From 2c8b962544daff20e57bc58cff38af6e1469cb3e Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Fri, 3 Sep 2021 20:07:19 +0530 Subject: [PATCH 01/64] Update symbl-elements.md Include horizontal line after metadata --- pre-built-ui/symbl-elements.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pre-built-ui/symbl-elements.md b/pre-built-ui/symbl-elements.md index d09286ed..3833d7b7 100644 --- a/pre-built-ui/symbl-elements.md +++ b/pre-built-ui/symbl-elements.md @@ -7,6 +7,8 @@ sidebar_label: Symbl React Elements import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- +
From 60333f512b1e271977533c87358c45b354c6e49d Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Fri, 3 Sep 2021 20:08:37 +0530 Subject: [PATCH 02/64] Update authentication.md Included horizontal line after metadata --- developer-tools/authentication.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/developer-tools/authentication.md b/developer-tools/authentication.md index b4895cde..f95973b5 100644 --- a/developer-tools/authentication.md +++ b/developer-tools/authentication.md @@ -7,6 +7,8 @@ sidebar_label: Authentication import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; + +--- Symbl uses the [OAuth2.0 Protocol](https://datatracker.ietf.org/doc/html/rfc6749) for Authentication. To begin, get your API Credentials from [Symbl Platform](https://platform.symbl.ai/). Using these credentials you can then generate the Access Token to invoke Symbl API calls. From a552a25a9e2b29ac98aea00d4d20f6e0e766fbdd Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Fri, 3 Sep 2021 20:13:49 +0530 Subject: [PATCH 03/64] Update subscribe-api.md Included horizontal line after metadata --- subscribe-api.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/subscribe-api.md b/subscribe-api.md index b3a71edf..b96801e9 100644 --- a/subscribe-api.md +++ b/subscribe-api.md @@ -8,6 +8,8 @@ slug: /subscribe-api import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::note In Beta Phase This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: @@ -151,4 +153,4 @@ Example of the `insight_response` object: } ] } -``` \ No newline at end of file +``` From 60884b763ff0998476ca14836a0a9350931a97f0 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Sat, 4 Sep 2021 12:08:52 +0530 Subject: [PATCH 04/64] Update jobs-api.md Included horizontal line after metadata --- async-api/overview/jobs-api.md | 1 + 1 file changed, 1 insertion(+) diff --git a/async-api/overview/jobs-api.md b/async-api/overview/jobs-api.md index 3f59524c..1872fb20 100644 --- a/async-api/overview/jobs-api.md +++ b/async-api/overview/jobs-api.md @@ -6,6 +6,7 @@ sidebar_label: Get Job Status import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- The Job Status API is used to retrieve the status of an ongoing Async API request. You can use the Job ID (`jobId`) received in the successful response of the Async API. From 721f0dc156dd64abcd5d8eb74345fbd40c7d8141 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Sat, 4 Sep 2021 12:10:12 +0530 Subject: [PATCH 05/64] Update introduction.md Included horizontal line after metadata --- telephony/introduction.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/telephony/introduction.md b/telephony/introduction.md index 89a25d44..08c19078 100644 --- a/telephony/introduction.md +++ b/telephony/introduction.md @@ -8,6 +8,8 @@ slug: /telephony/introduction import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Based on PSTN and SIP protocols, the Telephony API provides an interface for the developers to have Symbl bridge/join VoIP calls and get the results back in real-time as well. Optionally, you can also trigger an email at the end of the conversation containing the URL to view the transcription, insights and topics in a single page Web Application. ### Tutorials From 2f115c2ac7742cf81062238e788c9ffcb9c7fee0 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Sat, 4 Sep 2021 12:11:42 +0530 Subject: [PATCH 06/64] Update reference.md Included horizontal line after metadata --- telephony/reference/reference.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/telephony/reference/reference.md b/telephony/reference/reference.md index 1f523150..f964f4a9 100644 --- a/telephony/reference/reference.md +++ b/telephony/reference/reference.md @@ -7,6 +7,8 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Endpoint ### HTTP REQUEST From 6291030b2410c1fbc70631227b94aa6d61a4a808 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Sat, 4 Sep 2021 17:05:49 +0530 Subject: [PATCH 07/64] Update introduction.md Included horizontal line after metadata --- telephony/introduction.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/telephony/introduction.md b/telephony/introduction.md index 89a25d44..08c19078 100644 --- a/telephony/introduction.md +++ b/telephony/introduction.md @@ -8,6 +8,8 @@ slug: /telephony/introduction import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Based on PSTN and SIP protocols, the Telephony API provides an interface for the developers to have Symbl bridge/join VoIP calls and get the results back in real-time as well. Optionally, you can also trigger an email at the end of the conversation containing the URL to view the transcription, insights and topics in a single page Web Application. ### Tutorials From 8538242a09b0f4c58c24aaadf71fab10ae77c01b Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Sat, 4 Sep 2021 17:09:47 +0530 Subject: [PATCH 08/64] Update action-items.md Included horizontal line after metadata --- conversation-api/api-reference/action-items.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/conversation-api/api-reference/action-items.md b/conversation-api/api-reference/action-items.md index 12db28bf..f8aa97f7 100644 --- a/conversation-api/api-reference/action-items.md +++ b/conversation-api/api-reference/action-items.md @@ -7,6 +7,8 @@ slug: /conversation-api/action-items import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API returns a list of all the action items generated from the conversation. An action item is a specific outcome recognized in the conversation that requires one or more people in the conversation to act in the future. @@ -208,4 +210,4 @@ Field | Description You can also explore our Comprehensive Action Items API that is currently offered as a part of the Symbl Labs. Click [here](/docs/conversation-api/comprehensive-action-items) to read more. -::: \ No newline at end of file +::: From be61295d143495a33d3376f6e0bec4d2e56601a5 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Sat, 4 Sep 2021 17:10:51 +0530 Subject: [PATCH 09/64] Update follow-ups.md Included horizontal line after metadata --- conversation-api/api-reference/follow-ups.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/conversation-api/api-reference/follow-ups.md b/conversation-api/api-reference/follow-ups.md index 1084a7d4..35395e1b 100644 --- a/conversation-api/api-reference/follow-ups.md +++ b/conversation-api/api-reference/follow-ups.md @@ -7,6 +7,8 @@ slug: /conversation-api/follow-ups import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API returns list of all the follow-ups generated from the conversation. From 6bbaf911ea9c13e49755e6780d4f3ef16695032c Mon Sep 17 00:00:00 2001 From: Gayathri Krishnaswamy Date: Mon, 6 Sep 2021 09:39:14 +0530 Subject: [PATCH 10/64] Update symbl-elements.md Included horizontal line after metadata --- pre-built-ui/symbl-elements.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pre-built-ui/symbl-elements.md b/pre-built-ui/symbl-elements.md index d09286ed..3833d7b7 100644 --- a/pre-built-ui/symbl-elements.md +++ b/pre-built-ui/symbl-elements.md @@ -7,6 +7,8 @@ sidebar_label: Symbl React Elements import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- +
From df2bfd9a89b842955cbd5c0d2dc7e036197c102b Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:17:32 +0530 Subject: [PATCH 11/64] Update trackers.md --- conversation-api/api-reference/trackers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/api-reference/trackers.md b/conversation-api/api-reference/trackers.md index aa636a14..5b48ff85 100644 --- a/conversation-api/api-reference/trackers.md +++ b/conversation-api/api-reference/trackers.md @@ -95,7 +95,7 @@ response = requests.request("GET", url, headers=headers) if response.status_code == 200: # Successful API execution - print("actionItems => " + str(response.json()['actionItems'])) # actionsItems object containing actionItem id, text, type, score, messageIds, phrases, definitive, entities, assignee + print("trackers => " + str(response.json())) # trackers object containing tracker id, name, matches (array of object containing messageRefs, type, value, insightRefs) elif response.status_code in responses.keys(): print(responses[response.status_code]) # Expected error occurred else: From 2430b8df0e529a682abeef608886322aabebc0a1 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:24:57 +0530 Subject: [PATCH 12/64] Update all-conversations.md --- conversation-api/api-reference/all-conversations.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/conversation-api/api-reference/all-conversations.md b/conversation-api/api-reference/all-conversations.md index 1e23659c..1bab1d4d 100644 --- a/conversation-api/api-reference/all-conversations.md +++ b/conversation-api/api-reference/all-conversations.md @@ -85,12 +85,7 @@ response = requests.request("GET", url, headers=headers) if response.status_code == 200: # Successful API execution - print("id => " + response.json()['id']) # conversationId. - print("type => " + response.json()['type']) # type of conversation, default is meeting - print("name => " + response.json()['name']) # name of conversation - print("startTime => " + response.json()['startTime']) # start time of conversation - print("endTime => " + response.json()['endTime']) # end time of conversation - print("members => " + str(response.json()['members'])) # members who were part of conversation + print("conversations => " + str(response.json())) # object containing id, type, name, startTime, endTime, members object, and metadata object elif response.status_code in responses.keys(): print(responses[response.status_code]) # Expected error occurred else: From e1d4d067e3ac08e2d27e43538e105e37e4865480 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:29:50 +0530 Subject: [PATCH 13/64] Update all-conversations.md --- conversation-api/api-reference/all-conversations.md | 1 + 1 file changed, 1 insertion(+) diff --git a/conversation-api/api-reference/all-conversations.md b/conversation-api/api-reference/all-conversations.md index 1e23659c..ecf61bc9 100644 --- a/conversation-api/api-reference/all-conversations.md +++ b/conversation-api/api-reference/all-conversations.md @@ -192,5 +192,6 @@ Field | Description ```startTime``` | DateTime value of when the conversation started. ```endTime``` | DateTime value of when the conversation ended. ```members``` | A list of member objects containing ID, name and email (if detected). +```metadata``` | Contains user-defined metadata key values which are used for labelling conversations. From d42b860b269ca257d1f6b86c7659cb78b9f048f5 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:47:03 +0530 Subject: [PATCH 14/64] Update topics.md --- conversation-api/api-reference/topics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/api-reference/topics.md b/conversation-api/api-reference/topics.md index 524209c1..55deb4b4 100644 --- a/conversation-api/api-reference/topics.md +++ b/conversation-api/api-reference/topics.md @@ -257,7 +257,7 @@ Parameter | Required | Value |Description| Field | Description ---------- | ------- | -```id``` | Unique conversation identifier. +```id``` | Unique topic identifier. ```text``` | Conversation text. ```type``` | Response type. Default is topics. ```score``` | Confidence score of the generated topic. value from 0 - 1. From 100b61cd566bbac2e91174ed155d7dbf92cbbb4c Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:48:27 +0530 Subject: [PATCH 15/64] Update topics.md --- conversation-api/api-reference/topics.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conversation-api/api-reference/topics.md b/conversation-api/api-reference/topics.md index 524209c1..7b1fc33e 100644 --- a/conversation-api/api-reference/topics.md +++ b/conversation-api/api-reference/topics.md @@ -258,8 +258,8 @@ Parameter | Required | Value |Description| Field | Description ---------- | ------- | ```id``` | Unique conversation identifier. -```text``` | Conversation text. -```type``` | Response type. Default is topics. +```text``` | Topic text. +```type``` | Response type. Default is topic. ```score``` | Confidence score of the generated topic. value from 0 - 1. ```messageIds``` | Unique message identifiers of the corresponding messages. ```parentRefs``` | This is enabled when `parentRefs` is set to true in request. From b1c84e74813f9ab8316f9b3c77062e07fc43c22b Mon Sep 17 00:00:00 2001 From: Sekhar V <37143160+sekharvth@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:50:00 +0530 Subject: [PATCH 16/64] some change --- conversation-api/concepts/comprehensive-action-items.md | 1 + 1 file changed, 1 insertion(+) diff --git a/conversation-api/concepts/comprehensive-action-items.md b/conversation-api/concepts/comprehensive-action-items.md index 0aabc1d3..95685fa9 100644 --- a/conversation-api/concepts/comprehensive-action-items.md +++ b/conversation-api/concepts/comprehensive-action-items.md @@ -13,6 +13,7 @@ import TabItem from '@theme/TabItem'; :::info Symbl Labs This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. +sdf You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. From ca042170ca463e8b4f3ed64c6592fa3c98731ff3 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:53:52 +0530 Subject: [PATCH 17/64] Update questions.md --- conversation-api/api-reference/questions.md | 1 + 1 file changed, 1 insertion(+) diff --git a/conversation-api/api-reference/questions.md b/conversation-api/api-reference/questions.md index 2d361297..f4261712 100644 --- a/conversation-api/api-reference/questions.md +++ b/conversation-api/api-reference/questions.md @@ -146,4 +146,5 @@ Field | Description ```type``` | Response type. Default is `question`. ```score``` | Confidence score of the generated question. Value from 0 - 1. A score of 1 is likely an exact match, while a score of 0 means that no match was found. ```messageIds``` | Unique message identifiers of the corresponding messages. +```from``` | object contains userId, name and identifier of speaker From 45bc230f561fbd50accdba14b86a436d08116f79 Mon Sep 17 00:00:00 2001 From: Sekhar V <37143160+sekharvth@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:54:34 +0530 Subject: [PATCH 18/64] Slight changes in wording to improve readability --- conversation-api/concepts/comprehensive-action-items.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/conversation-api/concepts/comprehensive-action-items.md b/conversation-api/concepts/comprehensive-action-items.md index 95685fa9..165d5938 100644 --- a/conversation-api/concepts/comprehensive-action-items.md +++ b/conversation-api/concepts/comprehensive-action-items.md @@ -11,9 +11,7 @@ import TabItem from '@theme/TabItem'; --- :::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. - -sdf +This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. @@ -22,11 +20,11 @@ You can access the Labs features using your Symbl App Id and Secret. If you don For any queries or feedback, please contact us at labs@symbl.ai. ::: -The **Comprehensive Action Items API** is similar to the Action Items API except that the Comprehensive Action Items API returns a rephrased form of the original action item message that's enriched with its corresponding context. +The **Comprehensive Action Items API** is similar to the Action Items API, except that the Comprehensive Action Items API returns a rephrased form of the original action item message, that's enriched with its corresponding context. While both are equally powerful in providing Action Items that relate to a discussion, the Comprehensive Action Items API is designed to provide more details such as references to speaker names, context in which the action item was mentioned and an overall comprehensive description of the action items. -You can use the Action Items API if you wish to relate a message one-to-one with an action item and use the exact sentence from a transcript or utilize the Comprehensive Action Items API if you require more context to be sent in the response in a comprehensive format. +You can use the Action Items API if you wish to relate a message one-to-one with an action item and use the exact sentence from a transcript, or utilize the Comprehensive Action Items API if you require more context to be sent alongwith the original action item, in a comprehensive format. #### Examples | | From 044bdfcfbfacd8c894264ee04620aa5eefdbfc22 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:55:34 +0530 Subject: [PATCH 19/64] Update questions.md --- conversation-api/api-reference/questions.md | 53 +++++++-------------- 1 file changed, 16 insertions(+), 37 deletions(-) diff --git a/conversation-api/api-reference/questions.md b/conversation-api/api-reference/questions.md index 2d361297..17303ac5 100644 --- a/conversation-api/api-reference/questions.md +++ b/conversation-api/api-reference/questions.md @@ -97,43 +97,22 @@ exit() ```json { - "questions": [{ - "id": "5179649407582208", - "text": "Push them for the two weeks delivery, right?", - "type": "question", - "score": 0.9730208796076476, - "messageIds": [ - "5019269922291712" - ] - }, - { - "id": "5642466493464576", - "text": "I think what is the Bahamas?", - "type": "question", - "score": 0.9119608386876195, - "messageIds": [ - "5019269922291712" - ] - }, - { - "id": "5756718797553664", - "text": "Okay need be detained, or we can go there in person and support them?", - "type": "question", - "score": 0.893303149769215, - "messageIds": [ - "5019269922291712" - ] - }, - { - "id": "6235991715086336", - "text": "Why is that holiday in US from 17?", - "type": "question", - "score": 0.9998053310511206, - "messageIds": [ - "5019269922291712" - ] - } - ] + "questions": [ + { + "id": "6673386714431488", + "text": "It actually does not have speakers right?", + "type": "question", + "score": 0.9844425742283145, + "messageIds": [ + "5561105242914816" + ], + "from": { + "id": "47c74881-6475-433f-a13a-52152a3cb5aa", + "name": "Surbhi", + "userId": "Surbhi@symbl.ai" + } + } + ] } ``` From 8d07ca6b0af65dd26297d23e1927e9cba9102918 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 10:56:39 +0530 Subject: [PATCH 20/64] Update questions.md --- conversation-api/api-reference/questions.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conversation-api/api-reference/questions.md b/conversation-api/api-reference/questions.md index 2d361297..d9d62583 100644 --- a/conversation-api/api-reference/questions.md +++ b/conversation-api/api-reference/questions.md @@ -141,8 +141,8 @@ exit() Field | Description ---------- | ------- | -```id``` | Unique conversation identifier. -```text``` | Conversation text. +```id``` | Unique Question identifier. +```text``` | Question text. ```type``` | Response type. Default is `question`. ```score``` | Confidence score of the generated question. Value from 0 - 1. A score of 1 is likely an exact match, while a score of 0 means that no match was found. ```messageIds``` | Unique message identifiers of the corresponding messages. From 56a97db41dd9a099b58f9dd683263cbb5a438240 Mon Sep 17 00:00:00 2001 From: Sekhar V <37143160+sekharvth@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:02:26 +0530 Subject: [PATCH 21/64] Wording changes for better understanding --- conversation-api/api-reference/comprehensive-action-items.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/conversation-api/api-reference/comprehensive-action-items.md b/conversation-api/api-reference/comprehensive-action-items.md index f71a4157..2e6fe155 100644 --- a/conversation-api/api-reference/comprehensive-action-items.md +++ b/conversation-api/api-reference/comprehensive-action-items.md @@ -10,7 +10,7 @@ import TabItem from '@theme/TabItem'; --- :::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. +This feature is a part of Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. @@ -153,7 +153,7 @@ Field | Description ```id``` | Unique identifier of the comprehensive action item. ```text``` | Text of the comprehensive action item. ```type``` | Response type. Default is `action_item`. -```score``` | Confidence score of the generated action item. Value from 0 - 1. +```score``` | Confidence score of the detected action item. Value from 0 - 1. ```messageRefs.id``` | Unique identifiers of the corresponding messages from where the action item was derived. You may get multiple message IDs here as Symbl identifies all the relevant messages in the conversation and generates the required action item accordingly. ```entities``` | List of detected entity objects in the insight with `type` - entity type and `text` - corresponding text. ```definitive``` | Boolean indicating if the action item is definitive or not. From 27986d703659dce9ad5be80d5f0a5baced717f2e Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:03:24 +0530 Subject: [PATCH 22/64] Update conversation.md --- conversation-api/api-reference/conversation.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/conversation-api/api-reference/conversation.md b/conversation-api/api-reference/conversation.md index 92c49a74..4d6f98a0 100644 --- a/conversation-api/api-reference/conversation.md +++ b/conversation-api/api-reference/conversation.md @@ -144,4 +144,6 @@ Field | Description ```startTime``` | DateTime value of when the conversation started. ```endTime``` | DateTime value of when the conversation ended. ```members``` | A list of member objects containing ID, name and email (if detected). +```metadata``` | Contains user-defined metadata key values which are used for labelling conversations. + From 2e8602850532acb328383c45b6cbb39783100350 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:06:28 +0530 Subject: [PATCH 23/64] Update conversation.md --- .../api-reference/conversation.md | 49 ++++++++++--------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/conversation-api/api-reference/conversation.md b/conversation-api/api-reference/conversation.md index 92c49a74..06a08b93 100644 --- a/conversation-api/api-reference/conversation.md +++ b/conversation-api/api-reference/conversation.md @@ -108,29 +108,34 @@ exit() ```javascript + { - "id": "5179649407582208", - "type": "meeting", - "name": "Project Meeting #2", - "startTime": "2020-02-12T11:32:08.000Z", - "endTime": "2020-02-12T11:37:31.134Z", - "members": [ - { - "id": "5118221462011904", - "name": "John", - "email": "John@example.com" - }, - { - "id": "50123212234535645", - "name": "Mary", - "email": "Mary@example.com" - }, - { - "id": "63475698234689238", - "name": "Roger", - "email": "Roger@example.com" - } - ] + "id": "4549300882243584", + "type": "meeting", + "name": "4549300882243584", + "startTime": "2021-07-23T16:31:29.798Z", + "endTime": "2021-07-23T16:32:20.826Z", + "members": [ + + { + "id": "5118221462011904", + "name": "John", + "email": "John@example.com" + }, + { + "id": "50123212234535645", + "name": "Mary", + "email": "Mary@example.com" + }, + { + "id": "63475698234689238", + "name": "Roger", + "email": "Roger@example.com" + } + ], + "metadata": { + "label": "Business" + } } ``` From 94898f54cc0fd119810efa8fd3ea836f78f2308d Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:17:52 +0530 Subject: [PATCH 24/64] Follow consistency in using "Example" or "For Example" The consistency is off for "Example" & "For Example" along with the capitalization. Following "For example" would be beneficial according to the Symbl Steyleguide. --- what-is-symbl.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/what-is-symbl.md b/what-is-symbl.md index d5289a13..0fd13f76 100644 --- a/what-is-symbl.md +++ b/what-is-symbl.md @@ -59,7 +59,7 @@ Symbl's Sentiment API works over Speech-to-Text sentences and Topics (or aspect) ### ❇️ Action Items An action item is a specific outcome recognized in the conversation that requires one or more people in the conversation to take a specific action showing a clear commitment. -Examples: +For example, *“This was a great conversation, I will summarize this meeting and send a follow-up to all the stakeholders”* @@ -77,7 +77,7 @@ While both are equally powerful in providing Action Items that relate to a discu ### ❇️ Follow-Ups Symbl can recognize if an action item has a connotation, which requires following up in general or by someone in particular - which usually includes setting up a calendar invite. Follow-ups have details of assignee, datetime ranges entities and is regenerated with speaker context with reference to the transcription or message. The Summary UI comes with an out-of-the-box calendar integration for this follow-up insight type. -Example: +For example, *“John, let’s set a time to discuss the board updates tomorrow evening”* @@ -87,7 +87,7 @@ Example: ### ❇️ Questions Any explicit question or request for information that comes up during the conversation, whether answered or not, is recognized as a question. -Examples: +For example, *“What features are most relevant for our use case?” “How are we planning to design the systems?”* @@ -98,7 +98,9 @@ Examples: ### ❇️ Trackers (Beta) When it comes to detecting specific or “contextually similar” occurrences of a particular context in any conversation, the most commonly faced challenge is when the speakers talk about the context in general but do not speak the exact phrases. The Trackers API will however detect both exact and similar phrases. -For example “I don’t have any money” is contextually similar to “I ran out of budget” as both represent similar inherent meaning. +For example, + +“I don’t have any money” is contextually similar to “I ran out of budget” as both represent similar inherent meaning. However, after listening/hearing the conversation, it is understood that it indeed has the context that was meant to be detected. From eb9ed21a0dce86cc009222906689ba0821d04c45 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:20:32 +0530 Subject: [PATCH 25/64] Update entities.md --- conversation-api/api-reference/entities.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/api-reference/entities.md b/conversation-api/api-reference/entities.md index 6f8d57bf..b8a6ec91 100644 --- a/conversation-api/api-reference/entities.md +++ b/conversation-api/api-reference/entities.md @@ -14,7 +14,7 @@ This is a Beta API - Undergoing further development. If you have any suggestions, idea or questions about this API please reach us at devrelations@symbl.ai. ::: -This API provides you with a functionality to extract entities(custom, location, person, date, number, organization,datetime,daterange, etc ) from the conversation. +This API provides you with a functionality to extract entities (custom, location, person, date, number, organization, datetime, daterange etc.) from the conversation. ### Detecting Entities From f9fddc739b0d67665b16e4e60290799a187cf1d3 Mon Sep 17 00:00:00 2001 From: rishabh-chaturvedi <76057705+rishabh-chaturvedi@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:29:05 +0530 Subject: [PATCH 26/64] Update topics.md --- conversation-api/api-reference/topics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/api-reference/topics.md b/conversation-api/api-reference/topics.md index 524209c1..572a8051 100644 --- a/conversation-api/api-reference/topics.md +++ b/conversation-api/api-reference/topics.md @@ -262,5 +262,5 @@ Field | Description ```type``` | Response type. Default is topics. ```score``` | Confidence score of the generated topic. value from 0 - 1. ```messageIds``` | Unique message identifiers of the corresponding messages. -```parentRefs``` | This is enabled when `parentRefs` is set to true in request. +```parentRefs``` | This is enabled when `parentRefs` is set to true in request. Object containing type (as topic) and text of parent topic. ```sentiment```| Shows the [sentiment](/docs/concepts/sentiment-analysis) polarity (the intensity of negativity or positivity of a sentence) and suggested sentiment type (positive, negative and neutral). From d59f80bfb491fe03a14c1ab8e49065e84b2804d9 Mon Sep 17 00:00:00 2001 From: Akanksha Bhasin <35089751+Akankshabhasin@users.noreply.github.com> Date: Mon, 6 Sep 2021 11:44:18 +0530 Subject: [PATCH 27/64] Updated the Documentation --- introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/introduction.md b/introduction.md index 16d17697..0b3b5e7e 100644 --- a/introduction.md +++ b/introduction.md @@ -9,7 +9,7 @@ slug: / 👋 Welcome to Symbl documentation! Whether you're looking to understand Symbl's capabilities or get started with -our APIs, we've got you covered! +our APIs or SDKs, we've got you covered!
From d84bec7699983fbbbbf025b55419972438d6df6c Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 6 Sep 2021 12:14:49 +0530 Subject: [PATCH 28/64] Update follow-ups.md --- conversation-api/concepts/follow-ups.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/concepts/follow-ups.md b/conversation-api/concepts/follow-ups.md index 6fe9d3c6..82829193 100644 --- a/conversation-api/concepts/follow-ups.md +++ b/conversation-api/concepts/follow-ups.md @@ -12,7 +12,7 @@ import TabItem from '@theme/TabItem'; Symbl recognizes if an action item has a connotation or nature of language, which requires following up in general or by someone in a conversation. -#### Examples +#### For Example, * *"I will sync up with my manager and find out the agreed dates with the vendor".* Here, a person needs to follow up with their manager in order to complete this action. From 23c002c5e2acc1304680fa6e02bb25f18634fcea Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 6 Sep 2021 12:17:45 +0530 Subject: [PATCH 29/64] Update questions.md --- conversation-api/concepts/questions.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/conversation-api/concepts/questions.md b/conversation-api/concepts/questions.md index 61f6c35d..a3a5d64b 100644 --- a/conversation-api/concepts/questions.md +++ b/conversation-api/concepts/questions.md @@ -12,7 +12,7 @@ import TabItem from '@theme/TabItem'; Any explicit question or request for information that comes up during the conversation, whether answered or not, is recognized as a question. -#### Examples +#### For example, - *“What features are most relevant for our use case?”* - *“How are we planning to design the systems?”* @@ -26,7 +26,9 @@ Any explicit question or request for information that comes up during the conver ## Questions API -To see the Questions API in action, you need to process a conversation using Symbl. After you process a meeting, you'll receive a **Conversation ID** which is passed in Conversation API. A Conversation ID is the key to receiving conversational insights from any conversation. As an example, here's a simple API call which grabs the detected questions from the conversation. +To see the Questions API in action, you need to process a conversation using Symbl. After you process a meeting, you'll receive a **Conversation ID** which is passed in Conversation API. A Conversation ID is the key to receiving conversational insights from any conversation. + +For example, here's a simple API call which grabs the detected questions from the conversation. 👉 [Questions API](/docs/conversation-api/questions) From 813c36e2c1db73bd70483cb8f270f188fb521a9b Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 6 Sep 2021 12:21:23 +0530 Subject: [PATCH 30/64] Update trackers.md --- conversation-api/concepts/trackers.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/conversation-api/concepts/trackers.md b/conversation-api/concepts/trackers.md index 441480ae..605f423e 100644 --- a/conversation-api/concepts/trackers.md +++ b/conversation-api/concepts/trackers.md @@ -23,8 +23,9 @@ Symbl removes that complexity by reading into contexts and returning messages th Using these insights you can introduce process improvements and strategies that align with your business goals. -#### Examples -The tracker names used in the example below are only for explanatory purposes. You can create any tracker for your usecase and assign any name you like. +#### For example, + +Note: The tracker names used in the example below are only for explanatory purposes. You can create any tracker for your usecase and assign any name you like. - *Sandra is the training head of the Sales team in her company. She uses the insights from the **Intro Tracker** to look at how great sales agents start conversations and **Pricing Tracker** to understand how they share the pricing details. She then uses these insights to train other agents.* *The **Intro Tracker** Sandra uses tracks contextually similar phrases such as:*
@@ -50,7 +51,7 @@ The tracker names used in the example below are only for explanatory purposes. Y      *“Use case”*,
     *“Fits our requirement”*,
     *“Quotes”*,
-  *“Discounts”*. +     *“Discounts”*. ## Key Features @@ -88,4 +89,4 @@ Click [here](/docs/management-api/introduction) to read about the capabilities o ## Related Topics - [Viewing detected Trackers with Async API](/docs/async-api/code-snippets/track-phrases-in-a-conversation/#view-detected-trackers) -- [Detect Key Phrases with Streaming API](/docs/streamingapi/code-snippets/detect-key-phrases/#ontrackerresponse-json-response-example) \ No newline at end of file +- [Detect Key Phrases with Streaming API](/docs/streamingapi/code-snippets/detect-key-phrases/#ontrackerresponse-json-response-example) From e1d2498404fd9f67ba16f4116743c1b146afece3 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 6 Sep 2021 12:23:08 +0530 Subject: [PATCH 31/64] Update topic-hierarchy.md --- conversation-api/concepts/topic-hierarchy.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/concepts/topic-hierarchy.md b/conversation-api/concepts/topic-hierarchy.md index 5b7e696d..c0360493 100644 --- a/conversation-api/concepts/topic-hierarchy.md +++ b/conversation-api/concepts/topic-hierarchy.md @@ -22,7 +22,7 @@ high-quality topic hierarchy. In any conversation, there can be multiple related topics that get discussed and it is possible to organize them in a hierarchy for better insights and consumption. Symbl's Topic Hierarchy algorithm finds a pattern in the conversation and creates parent (global) topics with each parent topic having multiple child topics nested within it. -#### Example +#### For example, *If in a meeting “Sales Conversation” was talked about and after that, discussions around “Activity Call logs”, “Salesforce”, “Draft”, “Custom Integration” and “Jira” took place, the Topic Hierarchy will make Sales Conversation as the parent topic and the rest of the topics as the child topics under it.* From fbfa63ed396aaca343abd832968f4d8f1acd9f39 Mon Sep 17 00:00:00 2001 From: stokes-christopher <86790554+stokes-christopher@users.noreply.github.com> Date: Wed, 13 Oct 2021 20:43:57 -0500 Subject: [PATCH 32/64] Update start-and-stop-streaming-api-connection.md --- .../start-and-stop-streaming-api-connection.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md b/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md index a3e40215..78127588 100644 --- a/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md +++ b/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md @@ -21,7 +21,7 @@ Use this code to start a connection to the Streaming API using Javascript WebSoc ```js -const {sdk} = require('symbl-node'); +const {sdk} = require('@symblai/symbl-js'); const uuid = require('uuid').v4; (async () => { @@ -91,7 +91,7 @@ const uuid = require('uuid').v4; #### Testing -Create a javascript file named `app.js` and copy this code into the file. Fill in the placeholder values with the proper values. Use npm to install the required libraries: `npm install symbl-node uuid`. Now in the terminal run +Create a javascript file named `app.js` and copy this code into the file. Fill in the placeholder values with the proper values. Use npm to install the required libraries: `npm install @symblai/symbl-js uuid`. Now in the terminal run ```bash $ node app.js @@ -183,4 +183,4 @@ ws.send(JSON.stringify({ :::info Termination due to elongated silence If the meeting is silent for more than 30 minutes, it will be automatically terminated. The charges towards the silent minutes apply. -::: \ No newline at end of file +::: From 8cb1e93949614bb763d160e9b0d4c15162865d94 Mon Sep 17 00:00:00 2001 From: stokes-christopher <86790554+stokes-christopher@users.noreply.github.com> Date: Wed, 13 Oct 2021 20:51:02 -0500 Subject: [PATCH 33/64] Update connect-to-zoom-with-telephony-api.md --- .../connect-to-zoom-with-telephony-api.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/telephony/tutorials/connect-to-zoom-with-telephony-api.md b/telephony/tutorials/connect-to-zoom-with-telephony-api.md index 5dd025a9..6ba87ab2 100644 --- a/telephony/tutorials/connect-to-zoom-with-telephony-api.md +++ b/telephony/tutorials/connect-to-zoom-with-telephony-api.md @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; This guide uses a **PSTN** connection to connect to Zoom. **PSTN** audio quality maxes out to 8KHz. You can also use a **[SIP-based connection](/docs/concepts/pstn-and-sip#sip-session-initiation-protocol)**, which captures audio at 16KHz and above. ::: -[Symbl’s Telephony API](https://docs.symbl.ai/?shell#telephony-api) allows you to connect to any conference call system using PSTN or SIP networks. In this guide, we will walk you through how to get a live transcription and real-time AI insights, such as [follow-ups](/docs/concepts/follow-ups), [action items](/docs/concepts/action-items), [topics](/docs/concepts/topics) and [questions](/docs/conversation-api/questions), of a Zoom call using a PSTN connection. This application uses the Symbl Javascript SDK which requires the `symbl-node` node package. You must have an active Zoom call (no one has to be in it but yourself) and whatever you speak in the Zoom call will be taken by our API and processed for conversational insights. +[Symbl’s Telephony API](https://docs.symbl.ai/?shell#telephony-api) allows you to connect to any conference call system using PSTN or SIP networks. In this guide, we will walk you through how to get a live transcription and real-time AI insights, such as [follow-ups](/docs/concepts/follow-ups), [action items](/docs/concepts/action-items), [topics](/docs/concepts/topics) and [questions](/docs/conversation-api/questions), of a Zoom call using a PSTN connection. This application uses the Symbl Javascript SDK which requires the `@symblai/symbl-js` node package. You must have an active Zoom call (no one has to be in it but yourself) and whatever you speak in the Zoom call will be taken by our API and processed for conversational insights. :::info You must make sure your Zoom call allows phone dial-in for this example to work correctly. @@ -44,12 +44,12 @@ Key | Type | Description To get started, you’ll need your account credentials and [Node.js](https://nodejs.org/en/download/) installed (> v8.x) on your machine. -We’ll use the [Symbl module for Node.js](https://www.npmjs.com/package/symbl-node) in this guide. Make sure you have a Node project set up. If you don’t have one, you can set one up using [npm init](https://docs.npmjs.com/cli/init). +We’ll use the [Symbl module for Node.js](https://www.npmjs.com/package/@symblai/symbl-js) in this guide. Make sure you have a Node project set up. If you don’t have one, you can set one up using [npm init](https://docs.npmjs.com/cli/init). -From the root directory of your project, run the following command to add `symbl-node` in your project dependencies. +From the root directory of your project, run the following command to add `@symblai/symbl-js` in your project dependencies. ```bash -npm i --save symbl-node +npm i --save @symblai/symbl-js ``` ### Retrieve your Symbl API credentials @@ -63,7 +63,7 @@ Your credentials include your App ID and App Secret, which you can find on the h Create a new file named index.js in your project and add the following lines to initialize the Symbl SDK: ```javascript -const {sdk, SpeakerEvent} = require("symbl-node"); +const {sdk, SpeakerEvent} = require("@symblai/symbl-js"); const appId = appId; const appSecret = appSecret; @@ -184,7 +184,7 @@ This is a category of action items with a connotation to follow-up a request or Here's the full Code Sample below: ```js -const {sdk, SpeakerEvent} = require("symbl-node"); +const {sdk, SpeakerEvent} = require("@symblai/symbl-js"); const appId = appId; const appSecret = appSecret; const phoneNumber = ""; // US Zoom Numbers are "+16465588656", or "+14086380968". @@ -267,4 +267,4 @@ And that's it! Once Symbl is connected to the call, the API will take in your co :::info Termination due to elongated silence If the meeting is silent for more than 30 minutes, it will be automatically terminated. The charges towards the silent minutes apply. -::: \ No newline at end of file +::: From adebf983beacb27713832d5b7abc812f17ae7848 Mon Sep 17 00:00:00 2001 From: vishalsharma Date: Mon, 29 Nov 2021 22:29:08 +0530 Subject: [PATCH 34/64] DOCS(PR-47): Updated Extension Documentation --- integrations/agora-sdk-plugin.md | 34 ++++++++++++++------------------ 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/integrations/agora-sdk-plugin.md b/integrations/agora-sdk-plugin.md index 268885a2..23a5a654 100644 --- a/integrations/agora-sdk-plugin.md +++ b/integrations/agora-sdk-plugin.md @@ -97,25 +97,21 @@ After activating the Symbl Conversation Intelligence Extension, click the **View This section walks you through the steps necessary to set up the Symbl Conversation Intelligence extension in your mobile application. -1. Download the [Symbl Extension](https://cdn-agora.symbl.ai/agora-symblai-filter-debug.aar) (if you haven't already). - -2. Add the `.aar` file as a dependency to your application. -![agora-creds](/img/agora-arr-files.png) - -3. Add the following information into your `build.gradle` module file: +1. Add the following information into your `build.gradle` module file: ```js implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.squareup.okhttp3:okhttp:3.10.0' implementation 'org.java-websocket:Java-WebSocket:1.5.1' +implementation 'ai.symbl:android.extension:0.0.1' ``` -4. Implement the interface io.agora.rtc2.IMediaExtensionObserver +2. Implement the interface io.agora.rtc2.IMediaExtensionObserver ```js public class MainActivity extends AppCompatActivity implements io.agora.rtc2.IMediaExtensionObserver { ``` -5. Add the following method to set all the necessary information to initialize the Symbl configuration. You can find description for the parameters used in the table below: +3. Add the following method to set all the necessary information to initialize the Symbl configuration. You can find description for the parameters used in the table below: ```js private void setSymblPluginConfigs(JSONObject pluginParams) throws JSONException { @@ -311,23 +307,23 @@ import android.widget.FrameLayout; import android.widget.TextView; import com.google.gson.Gson; -import io.agora.extension.symblai.model.request.ApiConfig; -import io.agora.extension.symblai.model.request.RealtimeAPIConfig; -import io.agora.extension.symblai.model.request.RealtimeStartRequest; -import io.agora.extension.symblai.model.request.Redaction; -import io.agora.extension.symblai.model.request.Speaker; -import io.agora.extension.symblai.model.request.SpeechRecognition; -import io.agora.extension.symblai.model.request.SymblPluginConfig; -import io.agora.extension.symblai.model.request.Tracker; -import io.agora.extension.symblai.model.response.SymblResponse; +import ai.symbl.android.extension.model.request.ApiConfig; +import ai.symbl.android.extension.model.request.RealtimeAPIConfig; +import ai.symbl.android.extension.model.request.RealtimeStartRequest; +import ai.symbl.android.extension.model.request.Redaction; +import ai.symbl.android.extension.model.request.Speaker; +import ai.symbl.android.extension.model.request.SpeechRecognition; +import ai.symbl.android.extension.model.request.SymblPluginConfig; +import ai.symbl.android.extension.model.request.Tracker; +import ai.symbl.android.extension.model.response.SymblResponse; import org.json.JSONException; import org.json.JSONObject; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.UUID; -import io.agora.extension.symblai.ExtensionManager; -import io.agora.extension.symblai.SymblAIFilterManager; +import ai.symbl.android.extension.ExtensionManager; +import ai.symbl.android.extension.SymblAIFilterManager; import io.agora.rtc2.Constants; import io.agora.rtc2.IRtcEngineEventHandler; import io.agora.rtc2.RtcEngine; From 2cb53eb53cc6411d1c3c025157ec54d66b61fba4 Mon Sep 17 00:00:00 2001 From: Adam Voliva Date: Mon, 29 Nov 2021 14:44:04 -0800 Subject: [PATCH 35/64] Update post-audio.md --- async-api/overview/audio/post-audio.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/async-api/overview/audio/post-audio.md b/async-api/overview/audio/post-audio.md index 14884567..72044ac0 100644 --- a/async-api/overview/audio/post-audio.md +++ b/async-api/overview/audio/post-audio.md @@ -175,7 +175,7 @@ responses = { 500: 'Something went wrong! Please contact support@symbl.ai' } -response = requests.request("POST", url, headers=headers, data=json.dumps(payload), params=json.dumps(params)) +response = requests.request("POST", url, headers=headers, data=payload, params=json.dumps(params)) if response.status_code == 201: # Successful API execution @@ -352,4 +352,4 @@ Here value of `X` can be found in [FAQ](/docs/faq). :::caution You must wait for the job to complete processing before you proceed with getting the Conversation Intelligence. If you immediately make a GET request to Conversation API, it is possible that you'll receive incomplete insights. Therefore, ensure that you wait for the job to complete. -::: \ No newline at end of file +::: From 768e8b51bbf5396c32b5baac78d3b8e41d4fe897 Mon Sep 17 00:00:00 2001 From: Adam Voliva Date: Mon, 29 Nov 2021 14:48:19 -0800 Subject: [PATCH 36/64] Update put-audio.md --- async-api/overview/audio/put-audio.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/async-api/overview/audio/put-audio.md b/async-api/overview/audio/put-audio.md index 344127cc..d1646847 100644 --- a/async-api/overview/audio/put-audio.md +++ b/async-api/overview/audio/put-audio.md @@ -176,7 +176,7 @@ responses = { 500: 'Something went wrong! Please contact support@symbl.ai' } -response = requests.request("PUT", url, headers=headers, data=json.dumps(payload), params=json.dumps(params)) +response = requests.request("PUT", url, headers=headers, data=payload, params=json.dumps(params)) if response.status_code == 201: # Successful API execution @@ -358,4 +358,4 @@ Here value of `X` can be found in [FAQ](/docs/faq). :::caution You must wait for the job to complete processing before you proceed with getting the Conversation Intelligence. If you immediately make a GET request to Conversation API, it is possible that you'll receive incomplete insights. Therefore, ensure that you wait for the job to complete. -::: \ No newline at end of file +::: From 06c34fc4415342d52c61e1d9d3c9fb2e0c1833f5 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Tue, 30 Nov 2021 12:20:30 +0530 Subject: [PATCH 37/64] Action Items link fix --- conversation-api/api-reference/follow-ups.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/api-reference/follow-ups.md b/conversation-api/api-reference/follow-ups.md index 1084a7d4..8db866c0 100644 --- a/conversation-api/api-reference/follow-ups.md +++ b/conversation-api/api-reference/follow-ups.md @@ -10,7 +10,7 @@ import TabItem from '@theme/TabItem'; This API returns list of all the follow-ups generated from the conversation. -This is a category of [Action Items](action-items) with a connotation to follow-up a request or +This is a category of [Action Items](/docs/conversation-api/action-items) with a connotation to follow-up a request or a task like sending an email or making a phone call or booking an appointment or setting up a meeting. From 1126818332a5232a4af30802941347516f24cf16 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Tue, 30 Nov 2021 12:43:36 +0530 Subject: [PATCH 38/64] Changes to Follow-ups and Pre-built UI --- .../generate-pre-built-ui-from-video-recordings.md | 6 +++--- conversation-api/api-reference/follow-ups.md | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md b/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md index d3c41f06..ceff85d8 100644 --- a/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md +++ b/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md @@ -9,6 +9,8 @@ import TabItem from '@theme/TabItem'; [Symbl's Async API](/docs/async-api/introduction) allows you to process audio, video or text data and transform them into AI insights such as Topics, Action Items, Questions, and more. In this guide, we will walk you through how to process a video recording and receive our [Pre-built UI](/docs/pre-built-ui/summary-ui). The Pre-built UI generates a UI which contains all the information and analysis from your conversation which an be shared through a shareable link. +![Video Summary UI](/img/video-summary-ui-new.png) + ## Contents * [Getting Started](#getting-started) @@ -331,9 +333,7 @@ Once we make that call to the Experience API you will get a response with the Vi ## Conclusion -In the response is a `url` field that contains the URL to the Video Summary UI. Opening up that page will bring us to our Pre-built UI. You can view an example of what the Video Summary UI will look like: - -![Video Summary UI](/img/video-summary-ui-new.png) +In the response is a `url` field that contains the URL to the Video Summary UI. Opening up that page will bring us to our Pre-built UI. diff --git a/conversation-api/api-reference/follow-ups.md b/conversation-api/api-reference/follow-ups.md index 8db866c0..d0c0f685 100644 --- a/conversation-api/api-reference/follow-ups.md +++ b/conversation-api/api-reference/follow-ups.md @@ -4,9 +4,11 @@ title: GET Follow-Ups sidebar_label: GET Follow-Ups slug: /conversation-api/follow-ups --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- This API returns list of all the follow-ups generated from the conversation. From a1bd6077c0809c4a72d9be6883d0bb81a1862d5b Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Tue, 30 Nov 2021 12:46:09 +0530 Subject: [PATCH 39/64] Update follow-ups.md --- conversation-api/api-reference/follow-ups.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/api-reference/follow-ups.md b/conversation-api/api-reference/follow-ups.md index d0c0f685..cb32f17d 100644 --- a/conversation-api/api-reference/follow-ups.md +++ b/conversation-api/api-reference/follow-ups.md @@ -12,7 +12,7 @@ import TabItem from '@theme/TabItem'; This API returns list of all the follow-ups generated from the conversation. -This is a category of [Action Items](/docs/conversation-api/action-items) with a connotation to follow-up a request or +This is a category of [Action Items](action-items) with a connotation to follow-up a request or a task like sending an email or making a phone call or booking an appointment or setting up a meeting. From c0525bc1a597e59b06155c6cfecb160d873649cb Mon Sep 17 00:00:00 2001 From: vishalsharma Date: Tue, 30 Nov 2021 14:10:29 +0530 Subject: [PATCH 40/64] Upgraded the version number --- integrations/agora-sdk-plugin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/agora-sdk-plugin.md b/integrations/agora-sdk-plugin.md index 23a5a654..adada83a 100644 --- a/integrations/agora-sdk-plugin.md +++ b/integrations/agora-sdk-plugin.md @@ -103,7 +103,7 @@ This section walks you through the steps necessary to set up the Symbl Conversat implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.squareup.okhttp3:okhttp:3.10.0' implementation 'org.java-websocket:Java-WebSocket:1.5.1' -implementation 'ai.symbl:android.extension:0.0.1' +implementation 'ai.symbl:android.extension:0.0.2' ``` 2. Implement the interface io.agora.rtc2.IMediaExtensionObserver From d455d167dc2acb44a6e90e0bb12c1ea9881655cd Mon Sep 17 00:00:00 2001 From: vishalsharma Date: Tue, 30 Nov 2021 14:39:21 +0530 Subject: [PATCH 41/64] Removed Agora Specific Dependency --- integrations/agora-sdk-plugin.md | 1 - 1 file changed, 1 deletion(-) diff --git a/integrations/agora-sdk-plugin.md b/integrations/agora-sdk-plugin.md index adada83a..6bbb2806 100644 --- a/integrations/agora-sdk-plugin.md +++ b/integrations/agora-sdk-plugin.md @@ -100,7 +100,6 @@ This section walks you through the steps necessary to set up the Symbl Conversat 1. Add the following information into your `build.gradle` module file: ```js -implementation fileTree(include: ['*.jar'], dir: 'libs') implementation 'com.squareup.okhttp3:okhttp:3.10.0' implementation 'org.java-websocket:Java-WebSocket:1.5.1' implementation 'ai.symbl:android.extension:0.0.2' From 608cedb13d0d575ad394880eb533414d2c214051 Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Tue, 30 Nov 2021 17:45:00 +0530 Subject: [PATCH 42/64] Adds trackers rename --- conversation-api/concepts/trackers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversation-api/concepts/trackers.md b/conversation-api/concepts/trackers.md index 234f306d..0e4fda92 100644 --- a/conversation-api/concepts/trackers.md +++ b/conversation-api/concepts/trackers.md @@ -79,7 +79,7 @@ For step-by-step instructions on how to use Trackers API see the [Using Trackers Operation | Endpoint ---------- | ------- Create Tracker | [`POST` v1/manage/tracker](/management-api/trackers/create-tracker) -Create Trackers in Bulk | [`POST` v1/manage/tracker](/management-api/trackers/create-tracker#bulk-create-trackers-api) +Create Trackers in Bulk | [`POST` v1/manage/trackers](/management-api/trackers/create-tracker#bulk-create-trackers-api) Get Tracker with ID| [`GET`v1/manage/tracker/{trackerId}](/management-api/trackers/get-tracker#get-tracker-by-id) Get Tracker with name | [`GET` v1/manage/trackers?&name={trackerName}](/management-api/trackers/get-tracker#get-tracker) Update Tracker| [`PUT`v1/manage/tracker/{trackerId}](/management-api/trackers/update-tracker) From c276639826d73bf74c25e5594f3a085d1d43a4a1 Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Tue, 30 Nov 2021 17:56:00 +0530 Subject: [PATCH 43/64] adds new changes-bug fixes --- .../tutorials/generate-pre-built-ui-from-video-recordings.md | 2 +- conversation-api/api-reference/action-items.md | 2 ++ conversation-api/api-reference/follow-ups.md | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md b/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md index cea12c6d..e6bb3fb6 100644 --- a/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md +++ b/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md @@ -9,7 +9,7 @@ import TabItem from '@theme/TabItem'; [Symbl's Async API](/docs/async-api/introduction) allows you to process audio, video or text data and transform them into AI insights such as Topics, Action Items, Questions, and more. In this guide, we will walk you through how to process a video recording and receive our [Pre-built UI](/docs/pre-built-ui/summary-ui). The Pre-built UI generates a UI which contains all the information and analysis from your conversation which can be shared through a shareable link. -![Video Summary UI](/img/video-summary-ui-new.png) +![Video Summary UI](/img/summary-ui-intro.png) ## Contents diff --git a/conversation-api/api-reference/action-items.md b/conversation-api/api-reference/action-items.md index 59f3e925..5658f286 100644 --- a/conversation-api/api-reference/action-items.md +++ b/conversation-api/api-reference/action-items.md @@ -7,6 +7,8 @@ slug: /conversation-api/action-items import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API returns a list of all the action items generated from the conversation. An action item is a specific outcome recognized in the conversation that requires one or more people in the conversation to act in the future. diff --git a/conversation-api/api-reference/follow-ups.md b/conversation-api/api-reference/follow-ups.md index cb32f17d..d0c0f685 100644 --- a/conversation-api/api-reference/follow-ups.md +++ b/conversation-api/api-reference/follow-ups.md @@ -12,7 +12,7 @@ import TabItem from '@theme/TabItem'; This API returns list of all the follow-ups generated from the conversation. -This is a category of [Action Items](action-items) with a connotation to follow-up a request or +This is a category of [Action Items](/docs/conversation-api/action-items) with a connotation to follow-up a request or a task like sending an email or making a phone call or booking an appointment or setting up a meeting. From 51089936c2770bd018f735e742ea44fe6c914166 Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Fri, 3 Dec 2021 14:48:12 +0530 Subject: [PATCH 44/64] deletes orphan pages --- .../concepts/outbound-integrations.md | 19 - developer-tools/asr-eval.md | 37 -- developer-tools/media-converter.md | 55 -- getting-started.md | 8 - how-tos/analyze-audio.md | 239 -------- how-tos/analyze-text.md | 218 ------- how-tos/analyze-video.md | 243 -------- how-tos/get-call-analysis-js-sdk.md | 387 ------------- how-tos/make-telephony-call.md | 207 ------- javascript-sdk/overview/complete-example.md | 203 ------- javascript-sdk/overview/initialise.md | 75 --- nextjs/introduction.md | 73 --- ...-to-text-multiple-audio-files-directory.md | 37 -- .../get-topics-and-action-items-from-call.md | 24 - python-sdk/get-video-analysis-on-email.md | 39 -- ...udio-transcript-using-system-microphone.md | 18 - resources/testing.md | 20 - resources/usecases.md | 77 --- tools/cheat-sheet-curl.md | 541 ------------------ 19 files changed, 2520 deletions(-) delete mode 100644 conversation-api/concepts/outbound-integrations.md delete mode 100644 developer-tools/asr-eval.md delete mode 100644 developer-tools/media-converter.md delete mode 100644 getting-started.md delete mode 100644 how-tos/analyze-audio.md delete mode 100644 how-tos/analyze-text.md delete mode 100644 how-tos/analyze-video.md delete mode 100644 how-tos/get-call-analysis-js-sdk.md delete mode 100644 how-tos/make-telephony-call.md delete mode 100644 javascript-sdk/overview/complete-example.md delete mode 100644 javascript-sdk/overview/initialise.md delete mode 100644 nextjs/introduction.md delete mode 100644 python-sdk/get-speech-to-text-multiple-audio-files-directory.md delete mode 100644 python-sdk/get-topics-and-action-items-from-call.md delete mode 100644 python-sdk/get-video-analysis-on-email.md delete mode 100644 python-sdk/live-audio-transcript-using-system-microphone.md delete mode 100644 resources/testing.md delete mode 100644 resources/usecases.md delete mode 100644 tools/cheat-sheet-curl.md diff --git a/conversation-api/concepts/outbound-integrations.md b/conversation-api/concepts/outbound-integrations.md deleted file mode 100644 index a8aab5d2..00000000 --- a/conversation-api/concepts/outbound-integrations.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -id: outbound-integrations -title: Outbound Integrations -sidebar_label: Outbound Integrations ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -The platform currently offers email as out-of-box integration with the Node.js SDK and calendar integration on the pre-built post conversation summary UI. However, this can be extended to any work tool using extensible webhooks, where the actionable insights need to be pushed to enhance productivity and reduce the time taken by users to manually enter information from conversations. The same integrations can be enabled as suggestive actions to make this even quicker. - -Some of the examples of these work tools that can be integrated using the extensible webhooks can be: - -* Sales platforms such as Salesforce, Copper -* Task management solutions such as Trello, Google Tasks -* Calendars -* Project Management Tools such as Monday, Asana -* Collaboration platforms such as Slack, Flock diff --git a/developer-tools/asr-eval.md b/developer-tools/asr-eval.md deleted file mode 100644 index e3278bc1..00000000 --- a/developer-tools/asr-eval.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: asr-evaluation -title: Automatic Speech Recognition (ASR) Evaluation -sidebar_label: Automatic Speech Recognition (ASR) Evaluation ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Automatic Speech Recognition (ASR) Evaluation -This is a simple utility to perform a quick evaluation on the results generated by any Speech to text (STT) or Automatic Speech Recognition (ASR) System. - -This utility can calculate following metrics: - -* Word Error Rate (WER), which is a most common metric of measuring the performance of a Speech Recognition or Machine translation system - -* Levenshtein Distance calculated at word level. - -* Number of Word level insertions, deletions and mismatches between the original file and the generated file. - -* Number of Phrase level insertions, deletions and mismatches between the original file and the generated file. - -* Color Highlighted text Comparison to visualize the differences. - -* General Statistics about the original and generated files (bytes, characters, words, new lines etc.) - - -### Installation - -`$ npm install -g speech-recognition-evaluation` - -### Usage -Simplest way to run your first evaluation is by simply passing original and generated options to asr-eval command. Where, original is a plain text file containing original transcript to be used as reference; usually this is generated by human beings. And generated is a plain text file containing generated transcript by the STT/ASR system. - - -`$ asr-eval --original ./original-file.txt --generated ./generated-file.txt` - -For more information please visit [this](https://github.com/symblai/speech-recognition-evaluation). diff --git a/developer-tools/media-converter.md b/developer-tools/media-converter.md deleted file mode 100644 index ac89c520..00000000 --- a/developer-tools/media-converter.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -id: media-convertor -title: Media convertor -sidebar_label: Media convertor - ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Currently this utility only supports one feature: - -* Transcode Audio file - -This utility can be used as a library in your Node.js code. You can simply install it in your local project. - - -`$ npm install symbl-media --save` - - -Use the transcode command to transcode the file. - - -`$ media transcode -i ./my-input-file.wav -o ./my-output-file.mp3 -f mp3` - - -For more information please visit [this](https://github.com/symblai/symbl-media). - -> You can quickly transcode an audio file using transcodeMediaFile method. - - - - - - - - -```js -const {transcodeMediaFile} = require('symbl-media'); -(async () => { - try { - const result = await transcodeMediaFile('./my-input-file.wav', 'my-output-file.mp3','mp3'); - console.log('Successfully transcoded to: ', result.outPath); - } catch (e) { - console.error(e); - } -})(); -``` - - - diff --git a/getting-started.md b/getting-started.md deleted file mode 100644 index da32e617..00000000 --- a/getting-started.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -id: getting-started -title: Getting Started -sidebar_label: Getting Started -slug: /getting-started ---- - - \ No newline at end of file diff --git a/how-tos/analyze-audio.md b/how-tos/analyze-audio.md deleted file mode 100644 index 6970993f..00000000 --- a/how-tos/analyze-audio.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -id: analyze-audio -title: How to transcribe and analyze recorded audio files or URLs (Next.js) -sidebar_label: Audio Processing - ---- - -In this How to guide we will dive into how we can use Symbl AI Async API to retrieve insights on Audio File or URL. -This guide is somewhat specific to React ecosystem and you can see the full code of how it's implemented within Next.js demo -app [here](https://github.com/symblai/nextjs-symblai-demo). - -The main flow that we want to achieve is that you can upload an audio file or paste a URL within an input field and -click ![](/img/submitbutton.png). - - -This is how the UI of the demo app will look like: - -![audio page screenshot](/img/audio.png) - -When Audio will be sent to processing, You will see in the UI bunch of JSON data that contributes to audio processing and once processing is finished you will get transcripts for the audio. You also can query Symbl [Conversation API](https://docs.symbl.ai/#conversation-api) to get different insights on the audio. - -![audio processed](/img/audioprocessed.png) -![audio processed](/img/audioconversational.png) - -## Get Started - -We won't dive into all details of implementing Next.js app from scratch, so you always can check the [demo app code](https://github.com/symblai/nextjs-symblai-demo) for more info. Also feel free to open issues asking questions or writing suggestions. - -#### Retrieve your credentials - -In order to use Symbl API, you need to sign up and get your credentials. They include your `appId` and `appSecret`. You can find them on the home page of the platform. - -![](https://docs.symbl.ai/images/credentials-faf6f434.png) - -add credentials to `next-config.js` file filling in `APP_ID` and `APP_SECRET` variables. - -```javascript -module.exports = { - env: { - APP_ID: '', - APP_SECRET: '', - }, -} -``` - -In order to see demo app in action, you can clone the repo, run `yarn` and then `yarn dev`. - -Page you are looking for this tutorial is `/audio` or this [file](https://github.com/symblai/nextjs-symblai-demo/blob/master/pages/audio/index.tsx) - -At first you will see the following. ![](/img/notlogged.png) - -In order to see the app we will need to login. - -### Authenticating - -When using REST API, we would need to pass auth token in header. For that we've created component `ProtectedPage`. This component executes Symbl specific REST endpoint, to retrieve auth token and store it in context. Later on we can retrieve this token from the helper hook `useAuth` - -This is how we would retrieve the token: - -```javascript -async function loginToSymbl() { - const response = await fetch('https://api.symbl.ai/oauth2/token:generate', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - mode: 'cors', - body: JSON.stringify({ - type: 'application', - appId: process.env.APP_ID, - appSecret: process.env.APP_SECRET, - }), - }) - const json = await response.json() - console.log('Access Token is: ', json) - setToken(json.accessToken) -``` - -As you can see there is a specific endpoint to generate the token by providing correct `APP_ID` or `APP_SECRET` - -Later on in any place in application we can use `const { token } = useAuth()` to get the token. - -### Using Symbl audio Async API - -Symbl Audio async API is used to process audio file or url and return `jobId` which we can monitor for completion using [Job API](https://docs.symbl.ai/#job-api) by using polling mechanism and checking job status. Once job is completed, we will get `conversationId` which we can use to retrieve data with the help of [Conversation API](https://docs.symbl.ai/#conversation-api). - -#### File Upload and processing. - -If you look at `pages/audio/index.tsx` file, you will notice that for file upload we are using - -```javascript - -``` - -We won't dive into how it's implemented but basically the idea is to toggle between URL or file input. Once you click on the button ![](/img/submitbutton.png), it will call `onAudioSubmit` function passing either URL or file. - -```javascript -const onAudioSubmit = async (data: FileOrUrlInputSubmitType) => { - if (data.type === 'file') { - setFile(data?.file as any) - } else { - setAudioUrl(data?.url as string) - } -} -``` - -This will set our state. -All the processing is done within `useAudioAsyncAPI` hook. This hook monitors the state change - -```javascript -const { jobStatus, sentForProcessing } = useAudioAsyncAPI( - audioUrl !== '' ? audioUrl : file, - asyncApiParams.query -) -``` - -This hook monitors the state change using dependency array of `useEffect` and when state is changed it will start the flow. - -1. #### Get Request parameters for either file or url - -First we need to get relevant parameters for the Audio URL or file. the difference is mainly the `Content-Type`. - -You will see the following function defined within `useEffect` - -```javascript -async function getFileOrUrlOptions() { - if (isFile) { - const file = data - const requestOptionsAudio = { - method: 'POST', - headers: { - 'Authorization': `Bearer ${authToken}`, - 'Content-Type': MIME_TYPES['mp3'], - }, - body: file, - } - return requestOptionsAudio - } else { - const url = data - const requestOptionsAudio = await { - method: 'POST', - headers: { - 'Authorization': `Bearer ${authToken}`, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - url: url, - confidenceThreshold: 0.6, - timezoneOffset: 0, - }), - } - return requestOptionsAudio - } -} -``` - -Here we define a function to conditionally get different request parameters that we need to send in our REST call. After all there is a difference whether we send a file or URL - -2. Getting Symbl async Audio API endpoint. - -Async API endpoint for audio processing will change based on whether it's url or file: - -```javascript -const urlAudio = isFile - ? `https://api.symbl.ai/v1/process/audio${query}` - : `https://api.symbl.ai/v1/process/audio/url${query}` -``` - -We will use this endpoint to send audio url or audio file for processing. This endpoint will also accept a query string which we will use later on to pass parameters. - -3. #### Use Job API to poll for job status. - -Previously we mentioned that there should be some kind of polling mechanism to check whether the job is finished or not. Symbl has [Job API](https://docs.symbl.ai/#job-api) That we can use for that. - -```javascript -async function check(jobId: string) { - const checkJob = await fetch( - `https://api.symbl.ai/v1/job/${jobId}`, - requestOptions - ) - const checkJobJson = await checkJob.json() - setJobStatus(checkJobJson) - if (checkJobJson.status === 'in_progress') { - check(jobId) - return - } else { - setSentForProcessing(false) - } -} -``` - -In this function we are polling the Job API endpoint passing a Job Id, which we've received as a response from audio async api. We will poll untill we get `completed` status - -4. Send audio to processing - -```javascript -//Transcode and get Parameters -const requestOptionsAudio = await getFileOrUrlOptions() -// Execute the request -const processingResponse = await fetch(urlAudio, requestOptionsAudio) -const processingResponseJson = await processingResponse.json() -// Check Job Status -check(processingResponseJson.jobId) -``` - -After Job is finished we can get data from [Conversation API](https://docs.symbl.ai/#conversation-api). We will do that by using ``component. - -On this page you will also see this component `` - -This is prebuilt component from `@symblai/react-elements` package. As soon as you provide it with `conversationId`, It will nicely render conversation transcripts. There is also `` component that will do the same but for topics. - -### Passing additional Async API parameters to get more granular insights - -We can pass various parameters to improve processing of audio files. These parameters can be passed as a query string. -Such parameters include: - -- `customVocabulary` - Contains a list of words and phrases that provide hints to the speech recognition task. -- `detectPhrases` - Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's [Messages API](https://docs.symbl.ai/#get-messages-transcript). - -Diarization parameters. You can read more about them [here](https://docs.symbl.ai/#audio-api) - -## Summary - -In this How To we've briefly walked you through the key points and flows that should be implemented in order to send your audio to processing and get insights on what was the conversation in the audio about. Even though the code shared in this How To is React specific, The general flow can be used with any framework - -1. Authenticate and get the token -2. Send audio url/file to correct endpoint - -```javascript -const endpoint = isFile - ? `https://api.symbl.ai/v1/process/audio${query}` - : `https://api.symbl.ai/v1/process/audio/url${query}` -``` - -3. Get as a response `jobId` -4. Poll Job API endpoint to get processing status - `https://api.symbl.ai/v1/job/${jobId}` -5. Get the `conversationId` back when processing is completed -6. Use [Conversation API](https://docs.symbl.ai/#conversation-api) to get insights about the audio. diff --git a/how-tos/analyze-text.md b/how-tos/analyze-text.md deleted file mode 100644 index a4ef3e38..00000000 --- a/how-tos/analyze-text.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -id: analyze-text -title: How to analyze chat, messages, emails, or transcripts in batch mode -sidebar_label: Text Processing ---- -## Introduction - -In this How to guide we will dive into how we can use Symbl AI Async API to retrieve insights about conversation. Conversation should be in the format compliant with [Messages API](https://docs.symbl.ai/#get-messages-transcript). This guide is somewhat specific to React ecosystem and you can see the full code of how it's implemented within Next.js demo app [here](https://github.com/symblai/nextjs-symblai-demo). - -The main flow that we want to achieve is that you can submit a text in JSON format an input field and click ![](/img/submitbutton.png). - -This is how the UI of the demo app will look like: - -![text page screenshot](/img/text.png) - -On the left side there is a textfield where you can paste JSON in correct format, but for the convenience in this demo we've created UI, that helps you to construct messages in correct format by writing name and, message. userId and conversation type are optional. - -You also have a flag `detectPhrases`. When true is passed it shows Actionable Phrases in each sentence of conversation. - -![text page screenshot](/img/text2.png) - -When Text will be sent to processing, You will see in the UI bunch of JSON data that contributes to text processing and once processing is finished you can check out Text Transcripts, rendered using package `@symblai/react-elements` - -![text processed](/img/text3.png) - -## Get Started - -We won't dive into all details of implementing Next.js app from scratch, so you always can check the [demo app code](https://github.com/symblai/nextjs-symblai-demo) for more info. Also feel free to open issues asking questions or writing suggestions. - -#### Retrieve your credentials - -In order to use Symbl API, you need to sign up and get your credentials. They include your `appId` and `appSecret`. You can find them on the home page of the platform. - -![](https://docs.symbl.ai/images/credentials-faf6f434.png) - -add credentials to `next-config.js` file filling in `APP_ID` and `APP_SECRET` variables. - -```javascript -module.exports = { - env: { - APP_ID: '', - APP_SECRET: '', - }, -} -``` - -In order to see demo app in action, you can clone the repo, run `yarn` and then `yarn dev`. - -Page you are looking for this tutorial is `/text` or this [file](https://github.com/symblai/nextjs-symblai-demo/blob/master/pages/text/index.tsx) - -At first you will see the following. ![](/img/notlogged.png) - -In order to see the app we will need to login. - -### Authenticating - -When using REST API, we would need to pass auth token in header. For that we've created component `ProtectedPage`. This component executes Symbl specific REST endpoint, to retrieve auth token and store it in context. - -Later on we can retrieve this token from the helper hook `useAuth` which is basically a wrapper for `useContext`. - -`export const useAuth = () => useContext(AuthContext)` - -This is how we would retrieve the token: - -```javascript -async function loginToSymbl() { - const response = await fetch('https://api.symbl.ai/oauth2/token:generate', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - mode: 'cors', - body: JSON.stringify({ - type: 'application', - appId: process.env.APP_ID, - appSecret: process.env.APP_SECRET, - }), - }) - const json = await response.json() - console.log('Access Token is: ', json) - setToken(json.accessToken) -``` - -Later on in any place in application we can use `const { token } = useAuth()` to get the token. - -### Using Symbl Text Async API - -The Async Text API allows you to process any text payload to get the transcription and conversational insights. It can be useful in any use case where you have access to the textual content of a type of conversation, and you want to extract the insightful items supported by the Conversation API. If you want to add more content to the same conversation, use [PUT Async Text API](https://docs.symbl.ai/#text-api). - -Symbl Text async API will return `jobId` which we can monitor for completion by using polling mechanism. Once job is completed, we will get `conversationId` which we can use to retrieve data with Conversation API. - -#### Text upload and processing - -Text async API is built for particular format of messages list that you can upload and parse to get conversational insights. The format should look something like that: - -```json -[ - { - "payload": { - "content": "Hi Mike, Natalia here. Hope you don’t mind me reaching out. Who would be the best possible person to discuss internships and student recruitment at ABC Corp? Would you mind pointing me toward the right person and the best way to reach them? Thanks in advance for your help, I really appreciate it!" - }, - "from": { - "userId": "natalia@example.com", - "name": "Natalia" - }, - "duration": { - "startTime": "2020-07-21T16:02:19.01Z", - "endTime": "2020-07-21T16:04:19.99Z" - } - }, - { - "payload": { - "content": "Hey Natalia, thanks for reaching out. I am connecting you with Steve who handles recruitements for us." - }, - "from": { - "userId": "mike@abccorp.com", - "name": "Mike" - }, - "duration": { - "startTime": "2020-07-21T16:04:19.99Z", - "endTime": "2020-07-21T16:04:20.99Z" - } - } -] -``` - -So we will have a textarea on the page where you can paste you content. Then in order to process it, you need to click ![](/img/submitbutton.png) button. -For convenience we will also have "chat like" UI which we can use to build the json in the format mentioned above and submit it for processing. - -![](/img/chat.png) - -Once you submit text for processing, there are several things that happen - -#### 1. Get Relevant params for Text - -You will see the following params defined within `useEffect` - -```typescript -const requestTextOptions = { - method: 'POST', - headers: { - 'Authorization': `Bearer ${authToken}`, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - messages: JSON.parse(data), - }), -} - -const requestOptions = { - method: 'GET', - headers: { - ''Authorization': `Bearer ${authToken}`, - }, -} -``` - -3. #### Use Job API to poll for job status. - -Previously we mentioned that there should be some kind of polling mechanism to check whether the job is finished or not. Symbl has [Job API](https://docs.symbl.ai/#job-api) That we can use for that. - -```typescript -async function check(jobId: string) { - const checkJob = await fetch( - `https://api.symbl.ai/v1/job/${jobId}`, - requestOptions - ) - const checkJobJson = await checkJob.json() - setJobStatus(checkJobJson) - if (checkJobJson.status === 'in_progress') { - check(jobId) - return - } else { - setSentForProcessing(false) - } -} -``` - -In this function we are polling the Job API endpoint passing a Job Id, which we've received as a response to from text async api. We will poll untill we get `completed` status. - -4. Send text to processing - -```typescript -// Execute the request -const processingResponse = await fetch( - 'https://api.symbl.ai/v1/process/text', - requestTextOptions -) -const processingResponseJson = await processingResponse.json() -// Check Job Status -check(processingResponseJson.jobId) -``` - -After Job is finished we can get data from Conversation API. We will do that by using ``component. - -On this page you will also see this component `` - -This is prebuilt component from `@symblai/react-elements` package. As soon as you provide it with `conversationId`, It will nicely render conversation transcripts. There is also `` component that will do the same but for topics. - -## Summary - -In this How To we've briefly walked you through the key points and flows that should be implemented in order to send your text to processing and get insights on what was the conversation about. Conversation should be sent in a format of messages [Messages API](https://docs.symbl.ai/#get-messages-transcript). For that we've built a helper UI that helps creating messages in this format. It also supports pasting JSON. -Even though the code shared in this How To is React specific, The general flow can be used with any framework - -1. Authenticate and get the token -2. Send text in correct format to correct endpoint - -```javascript -const endpoint = 'https://api.symbl.ai/v1/process/text' -``` - -3. Get as a response `jobId` -4. Poll Job API endpoint to get processing status - `https://api.symbl.ai/v1/job/${jobId}` -5. Get the `conversationId` back when processing is completed -6. Use [Conversation API](https://docs.symbl.ai/#conversation-api) to get insights about the text. - -You can read more about Async Text API [here](https://docs.symbl.ai/#text-api) diff --git a/how-tos/analyze-video.md b/how-tos/analyze-video.md deleted file mode 100644 index 1bd41ac4..00000000 --- a/how-tos/analyze-video.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -id: analyze-video -title: How to transcribe and analyze video conversations asynchronously (Next.js) -sidebar_label: Video Processing using Next.js ---- - -In this How to guide we will dive into how we can use Symbl AI Async API to retrieve insights on Video File or URL. This guide is somewhat specific to React ecosystem and you can see the full code of how it's implemented within Next.js demo app [here](https://github.com/symblai/nextjs-symblai-demo). - -The main flow that we want to achieve is that you can upload a video file or paste a URL within an input field and click ![](/img/submitbutton.png). - -This is how the UI of the demo app will look like: - -![video page screenshot](/img/video.png) - -When Video will be sent to processing, You will see in the UI bunch of JSON data that contributes to video processing and once processing is finished you can check out Video Transcripts and even navigate these transcripts to move to specific part of the video by clicking on the transcript. - -![video processed](/img/videoprocessed.png) - -## Get Started - -We won't dive into all details of implementing Next.js app from scratch, so you always can check the [demo app code](https://github.com/symblai/nextjs-symblai-demo) for more info. Also feel free to open issues asking questions or writing suggestions. - -#### Retrieve your credentials - -In order to use Symbl API, you need to sign up and get your credentials. They include your `appId` and `appSecret`. You can find them on the home page of the platform. - -![](https://docs.symbl.ai/images/credentials-faf6f434.png) - -add credentials to `next-config.js` file filling in `APP_ID` and `APP_SECRET` variables. - -```javascript -module.exports = { - env: { - APP_ID: '', - APP_SECRET: '', - }, -} -``` - -In order to see demo app in action, you can clone the repo, run `yarn` and then `yarn dev`. - -Page you are looking for this tutorial is `/video` or this [file](https://github.com/symblai/nextjs-symblai-demo/blob/master/pages/video/index.tsx) - -At first you will see the following. ![](/img/notlogged.png) - -In order to see the app we will need to login. - -### Authenticating - -When using REST API, we would need to pass auth token in header. For that we've created component `ProtectedPage`. This component executes Symbl specific REST endpoint, to retrieve auth token and store it in context. - -Later on we can retrieve this token from the helper hook `useAuth` which is basically a wrapper for `useContext`. - -`export const useAuth = () => useContext(AuthContext)` - -This is how we would retrieve the token: - -```javascript -const { token, setToken } = useAuth() - -async function loginToSymbl() { - const response = await fetch('https://api.symbl.ai/oauth2/token:generate', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - mode: 'cors', - body: JSON.stringify({ - type: 'application', - appId: process.env.APP_ID, - appSecret: process.env.APP_SECRET, - }), - }) - const json = await response.json() - console.log('Access Token is: ', json) - setToken(json.accessToken) -``` - -As you can see there is a specific endpoint to generate the token by providing correct `APP_ID` or `APP_SECRET` - -Later on in any place in application we can use `const { token } = useAuth()` to get the token. - -### Using Symbl Video Async API - -Symbl Video async API is used to process video file or url and return `jobId` which we can monitor for completion using [Job API](https://docs.symbl.ai/#job-api) by using polling mechanism and checking job status. Once job is completed, we will get `conversationId` which we can use to retrieve data with the help of [Conversation API](https://docs.symbl.ai/#conversation-api). - -#### File Upload and processing. - -If you look at `pages/video/index.tsx` file, you will notice that for file upload we are using - -```typescript - -``` - -We won't dive into how it's implemented but basically the idea is to toggle between URL or file input. Once you click on the button ![](/img/submitbutton.png), it will call `onVideoSubmit` function passing either URL or file. - -```typescript -const onVideoSubmit = async (data: FileOrUrlInputSubmitType) => { - setConversationData(null) - if (data.type === 'file') { - setFile(data?.file as any) - } else { - setVideoUrl(data?.url as string) - } -} -``` - -This will set our state. -All the processing is done within `useAsyncVideoApi` hook. - -```typescript -const { jobStatus, sentForProcessing } = useAsyncVideoApi( - videoUrl !== '' ? videoUrl : file, - asyncApiParams.query -) -``` - -This hook monitors the state change using dependency array of `useEffect` and when state is changed it will start the flow. - -#### 1. Get Relevant params for Video URL or file - -First we need to get relevant parameters for the Video URL or file. the difference is mainly the `Content-Type`. - -You will see the following function defined within `useEffect` - -```typescript -async function getFileOrUrlOptions() { - if (isFile) { - const file = data - const requestOptionsVideo = { - method: 'POST', - headers: { - 'Authorization': `Bearer ${authToken}`, - 'Content-Type': 'video/mp4', - }, - body: file, - json: true, - } - return requestOptionsVideo - } else { - const url = data - const requestOptionsVideo = await { - method: 'POST', - headers: { - 'Authorization': `Bearer ${authToken}`, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - url: url, - confidenceThreshold: 0.6, - timezoneOffset: 0, - }), - } - return requestOptionsVideo - } -} -``` - -Here we define a function to conditionally get different request parameters that we need to send in our REST call. After all there is a difference whether we send a file or URL - -2. Getting Symbl async Video API endpoint. - -Async API endpoint for video processing will change based on whether it's url or file: - -```javascript -const urlVideo = isFile - ? `https://api.symbl.ai/v1/process/video${query}` - : `https://api.symbl.ai/v1/process/video/url${query}` -``` - -We will use this endpoint to send video url or video file for processing. THis endpoint will also accept a query string which we will use later on to pass parameters. - -3. #### Use Job API to poll for job status. - -Previously we mentioned that there should be some kind of polling mechanism to check whether the job is finished or not. Symbl has [Job API](https://docs.symbl.ai/#job-api) That we can use for that. - -```typescript -async function check(jobId: string) { - const checkJob = await fetch( - `https://api.symbl.ai/v1/job/${jobId}`, - requestOptions - ) - const checkJobJson = await checkJob.json() - setJobStatus(checkJobJson) - if (checkJobJson.status === 'in_progress') { - check(jobId) - return - } else { - setSentForProcessing(false) - } -} -``` - -In this function we are polling the Job API endpoint passing a Job Id, which we've received as a response to from video async api. We will poll untill we get `completed` status - -4. Send video to processing - -```typescript -//Get request Parameters -const requestOptionsVideo = await getFileOrUrlOptions() -// Execute the request -const processingResponse = await fetch(urlVideo, requestOptionsVideo) -const processingResponseJson = await processingResponse.json() -// Check Job Status -check(processingResponseJson.jobId) -``` - -After Job is finished we can get data from Conversation API. We will do that by using ``component. - -On this page you will also see this component `` - -This is prebuilt component from `@symblai/react-elements` package. As soon as you provide it with `conversationId`, It will nicely render conversation transcripts. There is also `` component that will do the same but for topics, which Symbl will recognize as main topics within the conversation. - -### Passing additional Async API parameters to get more granular insights - -We can pass various parameters to improve processing of video files. These parameters can be passed as a query string to video processing endpoint. -Such parameters may include: - -- `customVocabulary` - Contains a list of words and phrases that provide hints to the speech recognition task. -- `detectPhrases` - Accepted values are true & false. It shows Actionable Phrases in each sentence of conversation. These sentences can be found in the Conversation's [Messages API](https://docs.symbl.ai/#get-messages-transcript). - -Diarization parameters. You can read more about them [here](https://docs.symbl.ai/#audio-api) - -## Summary - -In this How To we've briefly walked you through the key points and flows that should be implemented in order to send your video to processing and get insights on what was the conversation in the video about. Even though the code shared in this How To is React specific, The general flow can be used with any framework - -1. Authenticate and get the token -2. Send video url/file to correct endpoint - -```javascript -const endpoint = isFile - ? `https://api.symbl.ai/v1/process/video${query}` - : `https://api.symbl.ai/v1/process/video/url${query}` -``` - -3. Get as a response `jobId` -4. Poll Job API endpoint to get processing status - `https://api.symbl.ai/v1/job/${jobId}` -5. Get the `conversationId` back when processing is completed -6. Use [Conversation API](https://docs.symbl.ai/#conversation-api) to get insights about the video. - -You can read more about Async Video API [here](https://docs.symbl.ai/#video-api) diff --git a/how-tos/get-call-analysis-js-sdk.md b/how-tos/get-call-analysis-js-sdk.md deleted file mode 100644 index a44fb12a..00000000 --- a/how-tos/get-call-analysis-js-sdk.md +++ /dev/null @@ -1,387 +0,0 @@ ---- -id: get-call-analysis-js-sdk -title: Using Symbl Javascript SDK through Next.js app for Live and Post Call Analysis -sidebar_label: Live & Post Call Analysis - ---- - diff --git a/how-tos/make-telephony-call.md b/how-tos/make-telephony-call.md deleted file mode 100644 index b08b1b3f..00000000 --- a/how-tos/make-telephony-call.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -id: make-telephony-call -title: Next.js Telephony Rest Call -sidebar_label: Next.js Telephony Rest Call ---- - -In this How to guide, we will see how we can use Symbl [telephony API](https://docs.symbl.ai/#real-time-telephony-api) to call on the phone or meeting url and get live transcription in Next.js app. Symbl comes with NodeJS api, so same thing we can get by using NodeJS SDK on the server side since there are some use cases that you would want to use particularly server side.For example if you want to do some business logic decisions based on the data received from Symbl. - -The main flow that we want to achieve is that you can enter your phone number in UI and hit on ![](/img/call.png) button. - -This is how the UI of the demo app will look like: - -![text page screenshot](/img/nodeui.png) - -In addition to phone number, you can actually select whether you want to use Public Switched Telephony Networks (PSTN) or Session Initaition Protocol (SIP). You will be also able to add advanced params such as DTMF code or Summary email, where to send insights summary once conversation is finished - -## Get Started - -We won't dive into all details of implementing Next.js app from scratch, so you always can check the [demo app code](https://github.com/symblai/nextjs-symblai-demo) for more info. Also feel free to open issues asking questions or writing suggestions. - -#### Retrieve your credentials - -In order to use Symbl API, you need to sign up and get your credentials. They include your `appId` and `appSecret`. You can find them on the home page of the platform. - -![](https://docs.symbl.ai/images/credentials-faf6f434.png) - -add credentials to `next-config.js` file filling in `APP_ID` and `APP_SECRET` variables. - -```javascript -module.exports = { - env: { - APP_ID: '', - APP_SECRET: '', - }, -} -``` - -In order to see demo app in action, you can clone the repo, run `yarn` and then `yarn dev`. - -Page you are looking for this tutorial is `/phone` or this [file](https://github.com/symblai/nextjs-symblai-demo/blob/master/pages/phone/index.tsx) - -At first you will see the following. ![](/img/notlogged.png) - -In order to see the app we will need to login. - -### Authenticating - -When using REST API, we would need to pass auth token in header. For that we've created component `ProtectedPage`. This component executes Symbl specific REST endpoint, to retrieve auth token and store it in context. - -Later on we can retrieve this token from the helper hook `useAuth` which is basically a wrapper for `useContext`. - -`export const useAuth = () => useContext(AuthContext)` - -This is how we would retrieve the token: - -```javascript -async function loginToSymbl() { - const response = await fetch('https://api.symbl.ai/oauth2/token:generate', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - mode: 'cors', - body: JSON.stringify({ - type: 'application', - appId: process.env.APP_ID, - appSecret: process.env.APP_SECRET, - }), - }) - const json = await response.json() - console.log('Access Token is: ', json) - setToken(json.accessToken) -``` - -Later on in any place in application we can use `const { token } = useAuth()` to get the token. - -## Calling phone or meeting using REST API - -First of all what we need to do is to collect parameters from the user to send to Symbl Telephoni REST API, so Symbl will know how to initiate the call and how to process the conversation. - -- `type` - Symbl supports two meetings types. `PSTN` and `SIP` -- `phoneNumber` - Naturally we need to have a phone number to call, but in case of `SIP` type we will need to call url. Something like `sip:124@domain.com`. For simplicity we will pass this data under `phoneNumber` parameter. -- `dtmf` - This will be meeting code, which we need to pass in case meeting has such a code -- `summaryEmail` - you can pass a list of emails which will get summary of the call email. -- `insightTypes` - there are several insight types that we can pass to Symbl. - -Collecting these parameters is basically a set of React controlled input fields. You can check component `PhoneConfigurations.tsx` if you are curious how it's implemented. - -This component have two different behaviours. One is calling `api/call` route, that we've covered in (how-to-nextjs-node-sdk)[./how-to-nextjs-node-sdk.md] guide. The other version of it is basically instead of calling `api/call` with user entered parameters, to call - -`https://api.symbl.ai/v1/endpoint:connect` - -This will look like this: - -```typescript -const phoneNumberOrUri = - type === 'sip' ? { uri: _phoneNumber } : { phoneNumber: _phoneNumber } -const res = await fetch('https://api.symbl.ai/v1/endpoint:connect', { - method: 'POST', - headers: { - 'Authorization': `Bearer ${authToken}`, - 'Content-Type': 'application/json', - }, - mode: 'cors', - body: JSON.stringify({ - operation: 'start', - endpoint: { - type, - ...phoneNumberOrUri, - dtmf, - }, - insightTypes, - actions: [ - { - invokeOn: 'stop', - name: 'sendSummaryEmail', - parameters: { - emails: summaryEmails.split(','), - }, - }, - ], - data: { - session: { - name: 'Call from Next.js Phone(Client only)', - }, - }, - }), -}) -``` - -When we will execute `fetch` call to `endpoint:connect` endpoint, you will get a phone call either on your phone or `sip` url according to what you've chosen in UI. - -What we also need to do is to actually get a response of api call and set our connectionId in the app. We will do that by calling `connectionResponseHandler` callback function that we pass to `PhoneConfigurations.tsx` component. There we will set our response as `conversationData` in our app. - -## Connecting via websockets to get live transcriptions - -Now when we've got our phone call, we need to have a way to get real-time insights on the ongoing conversation. What we will do is monitor `conversationData` changes and if it changes, we will start our websocket. - -```typescript -useEffect(() => { - let ws: any = null - - //Check if we have conversationData and if we haven't already started websocket connection - if (conversationData && !ws) { - ws = new WebSocket( - `wss://api.symbl.ai/session/subscribe/${connectionId}?access_token=${token}` - ) - ws.onmessage = (event: MessageEvent) => { - const data = JSON.parse(event.data) - if (data.type === 'message_response') { - setMessages(data.messages) - } - if (data.type === 'transcript_response') { - setLiveTranscript(data) - } - setRealTimeData(data) - } - } - - // cleanup method which will be called before next execution. in your case unmount. - return () => { - ws && ws.close - } -}, [conversationData]) -``` - -Here we will subscribe to Symbl websocket url `wss://api.symbl.ai/session/subscribe/${connectionId}?access_token=${token}` to get real-time insights on ongoing conversation. - -We will get either `transcript_response` type or `message_response` type or `insight_response` type. For example `message_response` will look like this: - -```json -{ - "type": "message_response", - "messages": [ - { - "from": {}, - "payload": { - "content": "Yeah, tell us sir.", - "contentType": "text/plain" - }, - "duration": { - "startTime": "2020-10-22T15:32:14.500Z", - "endTime": "2020-10-22T15:32:16.000Z" - } - } - ] -} -``` - -Now what is left is to render UI based on what data we get. - -## Summary - -In this How To we've briefly walked you through the key points and flows that should be implemented in order to use [Symbl Telephony API](https://docs.symbl.ai/#real-time-telephony-api) and start a phone call and subscribe to real-time updates. -Even though the code shared in this How To is React specific, The general flow can be used with any framework - -1. Authenticate and get the token -2. Call Telephoni API REST endpoint with correct parameters - -```javascript -const endpoint = 'https://api.symbl.ai/v1/endpoint:connect' -``` - -3. Get as a response `conversationId` -4. Use [Conversation API](https://docs.symbl.ai/#conversation-api) to get insights about the text. - -You can read more about Telephoni API [here](https://docs.symbl.ai/#real-time-telephony-api) diff --git a/javascript-sdk/overview/complete-example.md b/javascript-sdk/overview/complete-example.md deleted file mode 100644 index 6008be4d..00000000 --- a/javascript-sdk/overview/complete-example.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -id: complete-example -title: Complete Example -sidebar_label: Complete Example ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import {Redirect} from '@docusaurus/router'; - - - - - - - - - -```js -const { - sdk, - SpeakerEvent -} = require('symbl-node'); - -sdk.init({ - // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai - appId: 'APP_ID', - appSecret: 'APP_SECRET', - basePath: 'https://api.symbl.ai' -}).then(() => { - - console.log('SDK Initialized'); - sdk.startEndpoint({ - endpoint: { - type: 'pstn', - phoneNumber: 'PHONE_NUMBER', // Use international code. - dtmf: 'DTMF_MEETING_ID' // if password protected, use "dtmf": "#,##" - } - }).then(connection => { - - const connectionId = connection.connectionId; - console.log('Successfully connected.', connectionId); - const speakerEvent = new SpeakerEvent({ - type: SpeakerEvent.types.startedSpeaking, - user: { - userId: 'john@example.com', - name: 'John' - } - }); - - setTimeout(() => { - speakerEvent.timestamp = new Date().toISOString(); - sdk.pushEventOnConnection( - connectionId, - speakerEvent.toJSON(), - (err) => { - if (err) { - console.error('Error during push event.', err); - } else { - console.log('Event pushed!'); - } - } - ); - }, 2000); - - setTimeout(() => { - speakerEvent.type = SpeakerEvent.types.stoppedSpeaking; - speakerEvent.timestamp = new Date().toISOString(); - - sdk.pushEventOnConnection( - connectionId, - speakerEvent.toJSON(), - (err) => { - if (err) { - console.error('Error during push event.', err); - } else { - console.log('Event pushed!'); - } - } - ); - }, 12000); - - // Scheduling stop endpoint call after 60 seconds - setTimeout(() => { - sdk.stopEndpoint({ - connectionId: connection.connectionId - }).then(() => { - console.log('Stopped the connection'); - console.log('Summary Info:', connection.summaryInfo); - console.log('Conversation ID:', connection.conversationId); - }).catch(err => console.error('Error while stopping the connection.', err)); - }, 90000); - - }).catch(err => console.error('Error while starting the connection', err)); - -}).catch(err => console.error('Error in SDK initialization.', err)); -``` - - - - - - -#### Below is a quick simulated speaker event example that: - -1. Initializes the SDK. -2. Initiates a connection with an endpoint -3. Sends a speaker event of type `startedSpeaking` for user John -4. Sends a speaker event of type `stoppedSpeaking` for user John -5. Ends the connection with the endpoint - -Strictly for illustration and understanding purposes, the code below pushes events by simply using `setTimeout()` method periodically, but -in real usage they should be pushed as they occur. - - - - - -```js -const { - sdk, - SpeakerEvent -} = require('symbl-node'); - -sdk.init({ - // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai - appId: 'APP_ID', - appSecret: 'APP_SECRET', - basePath: 'https://api.symbl.ai' -}).then(() => { - console.log('SDK Initialized'); - sdk.startEndpoint({ - endpoint: { - type: 'pstn', - phoneNumber: 'PHONE_NUMBER', // Use international code. - dtmf: 'DTMF_MEETING_ID' // if password protected, use "dtmf": "#,##" - }, - actions: [{ - "invokeOn": "stop", - "name": "sendSummaryEmail", - "parameters": { - "emails": [ - "EMAIL_ADDRESS" - ] - } - }], - data: { - session: { - name: 'My Meeting Name' // Title of the Meeting - }, - users: [{ - user: { - name: "John", - userId: "john@example.com", - role: "organizer" - } - }, - { - user: { - name: "Mary", - userId: "mary@example.com" - } - }, - { - user: { - name: "John", - userId: "jennifer@example.com" - } - } - ] - } - }).then((connection) => { - console.log('Successfully connected.'); - - // Events pushed in between - setTimeout(() => { - // After successful stop endpoint, an email with summary will be sent to "john@example.com" and "jane@example.com" - sdk.stopEndpoint({ - connectionId: connection.connectionId - }).then(() => { - console.log('Stopped the connection'); - console.log('Summary Info:', connection.summaryInfo); - console.log('Conversation ID:', connection.conversationId); - }).catch(err => console.error('Error while stopping the connection.', err)); - }, 30000); - - }).catch(err => console.error('Error while starting the connection', err)); - -}).catch(err => console.error('Error in SDK initialization.', err)); -``` - - - - diff --git a/javascript-sdk/overview/initialise.md b/javascript-sdk/overview/initialise.md deleted file mode 100644 index 5ff02c57..00000000 --- a/javascript-sdk/overview/initialise.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -id: initialise -title: Initialize the SDK -sidebar_label: Initialize the SDK ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import {Redirect} from '@docusaurus/router'; - - - - - -## Installation - -First start by installing our Javascript SDK: - -```bash -$ npm install --save symbl-node -``` - - -## Initialize - - - - - -```js - sdk.init({ - // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai - appId: APP_ID, - appSecret: APP_SECRET, - basePath: 'https://api.symbl.ai' - }) - .then(() => console.log('SDK Initialized.')) - .catch(err => console.error('Error in initialization.', err)); - ``` - - - - - Import the SDK using the ES5 or ES6 way: - - - - - -```js - -var sdk = require('symbl-node').sdk; -``` - - - - - -```js - -import { sdk } from 'symbl-node'; -``` - - - diff --git a/nextjs/introduction.md b/nextjs/introduction.md deleted file mode 100644 index f1f99bc7..00000000 --- a/nextjs/introduction.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -id: introduction -title: NextJS App -sidebar_label: NextJS App ---- - -Symbl is a comprehensive suite of APIs for analyzing natural human conversations - both for your team’s internal conversations and of course the conversations you are having with your customers. Built on our Contextual Conversation Intelligence (C2I) technology, the APIs enable you to rapidly incorporate human-level understanding that goes beyond simple natural language processing of voice and text conversations. - -# Getting started - -To get started, you’ll need your account credentials and Node.js installed (> v8.x) on your machine. - -Your credentials include your appId and appSecret. You can find them on the home page of the platform. - -![App ID](https://docs.symbl.ai/images/credentials-faf6f434.png) - -After you retrieve them, you need to set them up under `.env` file in this [repo](https://github.com/symblai/nextjs-symblai-demo) - -``` -APP_ID= -APP_SECRET= -``` - -## About this app. - -This app is using [ReactJS](https://reactjs.org/), [Typescript](https://www.typescriptlang.org/) and [NextJS](https://nextjs.org/). Let's take a look at folder structure - -- `pages` In Next.js, a page is a React Component exported from a `.js`, `.jsx`, `.ts`, or `.tsx` file in the pages directory. Each page is associated with a route based on its file name. You can read more about how NextJS works with pages [here](https://nextjs.org/docs/basic-features/pages) - -- `pages/api` here reside NextJS API routes. Here we have `call` and `subscribeToPhoneEvents` routes that will be translated to `/api/call` and `/api/subscribeToPhoneEvents` paths in the browser. In this routes we showcase how you can use Symbl Node SDK to get real-time data from API. You can read more about NodeSDK [here](https://docs.symbl.ai/#symbl-sdk-node-js) or read a how to guide [here](https://docs.symbl.ai/#get-live-transcription-phone-call-node-js-telephony) - -### Index page - -On the first page of the app we showcase how you can call to your phone number (or meeting number) by providing the phone and multiple parameters. You will get conversational insights, transcriptions, messages, which `Symbl.ai` gets from this call. Architecture looks similar to this: - -![](https://docs.symbl.ai/images/tutorial_phone_integration-f54ba415.png) - -### REST API - -Symbl has REST API to do majority of things like getting insights, processing audio, video and text, get conversation data and much more. Rest of the pages of the app are focused on this API and showcase different use cases of Symbl from getting tasks and questions from existing conversation to uploading and processing video and audio files. You can even get transcriptions from the video and by clicking on these transcriptions, navigate to specific parts of the video. You can see this behavior on `/video` page. - -# Running app locally - -add credentials to `next-config.js` file filling in `APP_ID` and `APP_SECRET` variables. - -```javascript -module.exports = { - env: { - APP_ID: '', - APP_SECRET: '', - }, -} -``` - -run `yarn` or `npm install`. To run the app, use `yarn dev` - -Relevant docs section: - -- [Getting started with Symbl](https://docs.symbl.ai/#getting-started) -- [API overview using Postman](https://docs.symbl.ai/#postman) -- [Authentication](https://docs.symbl.ai/#authentication) - -How Tos are available [here](https://docs.symbl.ai/#how-tos) - -In this app represented are the following - -- Symbl Node SDK (Check out `api/call` file) -- REST Telephony API (`/phone` page) -- Conversational API (`/conversations` page) -- Async Audio API (`/audio` page) -- Async Video API (`/video` page) -- Async Text API (`/text` page) - Comming soon -- Symbl react elements package available [here](https://www.npmjs.com/package/@symblai/react-elements) diff --git a/python-sdk/get-speech-to-text-multiple-audio-files-directory.md b/python-sdk/get-speech-to-text-multiple-audio-files-directory.md deleted file mode 100644 index 23c14e16..00000000 --- a/python-sdk/get-speech-to-text-multiple-audio-files-directory.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -id: python-sdk-tutorials-speech-to-text -title: Get Speech-to-Text of Multiple Audio Files in a Directory -sidebar_label: Get Speech-to-Text of Multiple Audio Files in a Directory -slug: /python-sdk/python-sdk-tutorials-speech-to-text ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -```python -import symbl -from os import listdir -from os.path import isfile, join - -# returns lambda function with fileName which is under processing -def save_transcriptions_in_file(fileName): - return lambda conversation: on_success(conversation, fileName) - -# returns actual callback to save the transcriptions of a conversation in a file -def on_success(conversation, fileName): - transcriptions = conversation.messages() - - file = open(fileName + ".txt","w+") - file.write(str(transcriptions)) - file.close() - -# Look [here][unicodeerror], if you're getting unicode error -directory_path = r'' - -files = [join(directory_path, file) for file in listdir(directory_path) if isfile(join(directory_path, file))] - -# Process audio files in the above mentioned directory -for file in files: - job = symbl.Audio.process_file( - # credentials={app_id: , app_secret: }, #Optional, Don't add this parameter if you have symbl.conf file in your home directory - file_path=file, wait=False).on_complete(save_transcriptions_in_file(file)) -``` \ No newline at end of file diff --git a/python-sdk/get-topics-and-action-items-from-call.md b/python-sdk/get-topics-and-action-items-from-call.md deleted file mode 100644 index 9d05de8a..00000000 --- a/python-sdk/get-topics-and-action-items-from-call.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -id: python-sdk-tutorials-topics-and-action-items -title: Get Topics and Action Items from Call -sidebar_label: Get Topics and Action Items from Call -slug: /python-sdk/python-sdk-tutorials-topics-and-action-items ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Use the code given below to get Topics and Action Items using minimal lines of code: - -```py -import symbl - -# Process audio file -conversation = symbl.Audio.process_file( - # credentials={app_id: , app_secret: }, #Optional, Don't add this parameter if you have symbl.conf file in your home directory - file_path=) - -# Printing topics and actions -print("Topics are = " + str(conversation.topics())) - -print("Action Items = " + str(conversation.actions())) -``` \ No newline at end of file diff --git a/python-sdk/get-video-analysis-on-email.md b/python-sdk/get-video-analysis-on-email.md deleted file mode 100644 index c587a9ef..00000000 --- a/python-sdk/get-video-analysis-on-email.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -id: video-analysis-email -title: Get Video Conferencing Analysis on your Email -sidebar_label: Get Video Conferencing Analysis on your Email -slug: /python-sdk/tutorials/video-analysis-email ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -Symbl joins your video conferencing call (e.g., Zoom or Google Hangouts) and sends you analysis on your email after the call has ended. - -```python -import symbl - -connection = symbl.Telephony.start_pstn( - # credentials={app_id: , app_secret: }, #Optional, Don't add this parameter if you have symbl.conf file in your home directory - phone_number=phoneNumber, - dtmf = ",,{}#,,{}#".format(meetingId, password), - actions = [ - { - "invokeOn": "stop", - "name": "sendSummaryEmail", - "parameters": { - "emails": [ - emailId - ], - }, - }, - ] - ) - -print(connection) -``` - -Parameter | Required | Description | Value ------------ | ------- | ------- | ------- | -`phoneNumber` | Mandatory | Phone number including country code. If you are dailing in via phone to a conference tool, e.g., Zoom, Google hangouts, use the dail-in numbers provided. | `"+11234567890"` -`dtmf`| Optional | The DTMF details for dailing into your conference tool in the format `",,{}#,,{}#".format(meetingId, password)` | `meetingId`- Your meeting ID of your conference tool. Example`"12345"`.     `password` - Your meeting password of your conference tool. Example: `"A1B2C3D4"`.  `emailId`- Your email ID registered on the conference tool. Example: `"stacy@example.com"`| -`emailId` | Mandatory | The email ID where you'd like to recieve the analysis should be sent as a part of the `actions`. | `philson@example.com` \ No newline at end of file diff --git a/python-sdk/live-audio-transcript-using-system-microphone.md b/python-sdk/live-audio-transcript-using-system-microphone.md deleted file mode 100644 index 60689f8a..00000000 --- a/python-sdk/live-audio-transcript-using-system-microphone.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: python-sdk-tutorials-live-transcript -title: Get Live Audio Transcript using System Microphone -sidebar_label: Get Live Audio Transcript using System Microphone -slug: /python-sdk/python-sdk-tutorials-live-transcript ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -```py -import symbl - -connection = symbl.Streaming.start_connection() - -connection.subscribe({'transcript_response': lambda response: print('got this response from callback', response)}) - -connection.send_audio_from_mic() -``` \ No newline at end of file diff --git a/resources/testing.md b/resources/testing.md deleted file mode 100644 index 695b4ce2..00000000 --- a/resources/testing.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -id: testing -title: Testing -sidebar_label: Testing ---- - -Now that you've built out an integration using either the Voice SDK or Voice API, let's test to make sure your integration is working as expected. - -1. If you are dialed in with your phone number, try speaking the following sentences to see the generated output - -* "Hey, it was nice meeting you yesterday. Let's catch up again next week over coffee sometime in the evening. I would love to discuss the next steps in our strategic roadmap with you." - -* "I will set up a meeting with Roger to discuss our budget plan for the next quarter and then plan out how much we can set aside for marketing efforts. I also need to sit down with Jess to talk about the status of the current project. I'll set up a meeting with her probably tomorrow before our standup." - -2. If you are dialed into a meeting, try running any of the following videos with your meeting platform open and view the summary email that gets generated: - -* Scrum Meeting -* Progress Meeting - -3. Try tuning your summary page with query parameters to customize your output. diff --git a/resources/usecases.md b/resources/usecases.md deleted file mode 100644 index 4bad4ae6..00000000 --- a/resources/usecases.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -id: usecases -title: Symbl Usecases -sidebar_label: Symbl Usecases - ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -Using Symbl you can build use cases for support, sales, collaboration apps and for workflow automation for single or multiple conversations to identify real-time growth opportunities, create indexed knowledge from conversations and drive productivity.
- -### Meetings & UCaaS -Applying primarily to unified communication and collaboration platforms (UCaaS), you can add real-time recommendations of action items and next steps as part of your existing workflow. This would meaningfully improve meeting productivity by surfacing the things that matter, as the meeting occurs. Beyond real-time action items, take advantage of automated meetings summaries delivered to your preferred channel, like email, chat, Slack, calendar, etc.
- -Use real-time contextual recommendations to enable participants to drive efficiencies in their note-taking, save time and focus more on the meeting itself. Action items are surfaced contextually and in real-time and can be automated to trigger your existing workflows.
- -Post-meeting summaries are helpful for users that like to get more involved in the conversation as it happens, and prefer re-visiting information and action items post-meeting.
- -#### Benefits: - -* Humans tend to be slightly subjective when taking notes. Remove bias from notes with an objective conversation intelligence service that contextually surfaces what matters.
-* Humans are actually not that great at multitasking. Increase participation and engagement by adding a highly accurate note-taking AI service to the meeting.
-* Humans share… most of it. Access and search through complete meeting transcripts, meeting notes, action items, summaries, insights, contextual topics, questions, signals, etc.
-* Humans create culture. Understand patterns and trends in your organization’s meeting culture - sentiment, talk ratios, most productive conversations, etc.
- -### Customer Care & CCaaS -As we understand it, customer care performance can be measured by 3 proxy metrics: customer satisfaction, time spent on call, and the number of calls serviced.
- -What if the introduction of a real-time passive conversation intelligence service into each call was to improve all 3 metrics at once? Real-time contextual understanding leads to suggested actions that a customer care agent can act upon during the call, enabling the agent to:
- -1. Focus on the human connection with the customer. -2. Come to a swifter resolution thanks to task automation -3. Serve more customers with elevated experience during a shift.
- -Further, the [Symbl.ai](https://symbl.ai) platform is also capable of automating post-call data collection. This enables analysis of support conversations over time, agents, shifts,and groups, which leads to a better understanding of pain-points, topics of customer support conversation, etc.
- -#### Benefits: Support Organization -* Improved customer experience thanks to more engaged support conversations. -* Reduce average call handling time thanks to automated real-time actions. -* Better data for coaching and benchmarking support staff. -* High-level understanding of topics and summaries of support conversation. -* Emotional analysis of conversational data. - -### Sales Enablement & CRM -Digital communication platforms used for sales engagements and customer interactions need to capture conversational data for benchmarking performance, improve net sales, and for identifying and replicating the best-performing sales scripts.
- -Use [Symbl.ai](https://symbl.ai) to identify top-performing pitches by leveraging real-time insights. Accelerate the sales cycle by automating suggested action items in real-time, such as scheduling tasks and follow-ups via outbound work tool integrations. Keep your CRM up to date by automating the post-call entry with useful summaries.
- -#### Benefits: Sales Agent -* Real-time suggested actions. -* Real-time analysis & insights from the conversation. -* Auto-scheduling tasks and follow-ups through outbound work tool integrations. -* Full, searchable transcript of sales conversations. -* Automate the post-call entry into your CRM. - -#### Benefits: Sales Enablement / VP of Sales -* A high-level performance view of the sales function. -* Customizable dashboard to view calls in different filters. -* Understand what works best in a sales call: topics, questions, competitor mentions, etc. -* Replicate best performing scripts to train and coach your whole team to success. - -### Social Media Conversations -Customers interact a lot with Brands on social media and other digital channels. These interactions include feedback, reviews, complaints, and a lot of other mentions. This is valuable data if used properly to derive insights for the business.
- -Symbl's APIs can be used along with social listening tools to extract, categorize all of this into actionable insights. For example, topics can be very helpful in abstracting data from product reviews, threads of conversation, and social media comments. Questions and requests from social interactions and forums can be identified to build a knowledge base and direct the customer conversations to the right resources.
- -With the right integrations to CRM tools and Knowledgebase, insights from social conversations can lead to better understanding customer sentiment towards the brand and more efficient customer service on social channels.
- -#### Benefits for Brands -* Extract topics from reviews based on different levels of ratings and identify what leads to good/bad ratings. -* Evaluate influencers, affiliates to work with the brand and ensure right messaging throughout the campaign. -* Understand customer voice from comments and live interactions on Facebook, youtube channels. -* Identify and document questions and requests from specific customers on product forums, comments, and replies to social media posts. -* Guide customers to relevant knowledge base articles or support streams based on their complaints and queries on social media. -* Enrich customer data on CRM based on insights identified from customer-specific social interactions.
diff --git a/tools/cheat-sheet-curl.md b/tools/cheat-sheet-curl.md deleted file mode 100644 index ee87af8b..00000000 --- a/tools/cheat-sheet-curl.md +++ /dev/null @@ -1,541 +0,0 @@ ---- -id: cheat-sheet-for-curl-commands -title: Cheat Sheet for `cURL` Commands -sidebar_label: cheat sheet ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -The following cheat sheet of `cURL` commands for Symbl.ai's APIs provides pre-formatted code for you to run in your terminal. The cheat sheet provides commands for: - - -* [Authentication](#authentication) -* [Telephony API](#telephony-api) - * [Connect a PSTN Call](#connect-a-pstn-call) - * [Connect a Connect a SIP Call](#Connect-a-SIP-Call) - * [Stop a Call](#Stop-a-Call) - - -### Authentication - -POST request with which the Authentication API takes the `appId` with the `appSecret` to return a JSON Web Token. - -Here is the API endpoint: -`https://api.symbl.ai/oauth2/token:generate` - -```bash -curl --location --request POST 'https://api.symbl.ai/oauth2/token:generate' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "type": "application", - "appId": "", - "appSecret": "" -}' -``` -The output is the following: - -```bash -{"accessToken":"","expiresIn":86400} -``` - -## Telephony API - -The Telephony API has three API endpoints. - -### Connect a PSTN Call - -POST request with which to initiate a PSTN call. - -Here is the API endpoint: -`https://api.symbl.ai/v1/endpoint:connect` - - - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/endpoint:connect' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "operation": "start", - "endpoint": { - "type" : "pstn", - "phoneNumber": "+12532158782", - "dtmf": "6671425093" - }, - "actions": [{ - "invokeOn": "stop", - "name": "sendSummaryEmail", - "parameters": { - "emails": [ - "__email_address__" - ] - } - }], - "data" : { - "session": { - "name" : "__name_of_this_call__" - } - } -}' - -``` - -### Connect a SIP Call - -POST request with which to initiate a SIP call. - -Here is the API endpoint: -`https://api.symbl.ai/v1/endpoint:connect` - - - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/endpoint:connect' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "operation": "start", - "endpoint": { - "type" : "sip", - - // Replace with a valid SIP URI, accessible over internet. See more - https://tools.ietf.org/html/rfc5630 - // SIP with RTP and SIPS with SRTP for secure VoIP traffic are supported - // URI paramters can be passed in - // include country code, example - "sip:john.99332@example.com" - "uri": "__sip_uri__" - }, - "actions": [{ - "invokeOn": "stop", - "name": "sendSummaryEmail", - "parameters": { - "emails": [ - // list of valid email addresses. - // If added, an email will be sent at the end of the call - "__email_address__" - ] - } - }], - "data" : { - "session": { - // Give name to your meeting/call - "name" : "__name_of_this_session__" - // Optionally, send participants in the meeting/call, Uncomment following '\''users'\'' array. - // "users": [{ - // "user": { - // "name": "John", - // "userId": "john@example.com", - // "role": "organizer" - // } - // }, - // { - // "user": { - // "name": "Mary", - // "userId": "mary@example.com" - // } - // }] - } - } -}' -``` - -### Stop a Call - -POST request with which to stop a call no matter what type. - -Here is the API endpoint: -`https://api.symbl.ai/v1/endpoint:connect` - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/endpoint:connect' \ ---header 'Content-Type: application/json' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---data-raw '{ - "operation": "stop", - "connectionId": "" -}' -``` - -## Async API - -### Process Text - -POST request with which to process Text. - -Here is the API endpoint: -`https://api.symbl.ai/v1/process/text` - - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/process/text' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "messages": [ - { - "payload": { - "content": "Hello. So this is a live demo that we are trying to give very we are going to show how the platform detects various insights can do transcription in real time and also the different topics of discussions, which would be generated after the call is over, and they will be an email that will be sent to the inbox. So that is the idea. So I am going to do a quick conversation. I would say where I will demonstrate all of this great catching up. Thanks for calling good to hear. From you. And I would love to hear more about what you have to offer? I will set up a time and appointment probably sometime tomorrow evening where we can go over the documents that you'\''re providing. I love all the plants. I just need to discuss with my family in terms of which one will we go forward with it? It very excited to hear from you and the discount and look forward to talking sharply. I have a quick question though. Is there basically website? Where I can go to and look at all these details myself. It will be very helpful. Can you also share the quotation to me on email so that I can go ahead and talk about it with my other kind of folks in the family? That'\''s it. Thanks a lot. Thanks for calling good catching up. Talk soon.", - "contentType": "text/plain" - }, - "from": { - "name": "John", - "userId": "john@example.com" - } - } - ] -}' - -``` - -POST request with which to create a conversation from email. - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/process/text' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---data-raw '{ - "messages": [ - { - "payload": { - "content": "Hi Mike, Natalia here. Hope you don’t mind me reaching out. Who would be the best possible person to discuss internships and student recruitment at ABC Corp? Would you mind pointing me toward the right person and the best way to reach them? Thanks in advance for your help, I really appreciate it!" - }, - "from": { - "userId": "natalia@example.com", - "name": "Natalia" - } - }, - { - "payload": { - "content": "Hey Natalia, thanks for reaching out. I am connecting you with Steve who handles recruitements for us." - }, - "from": { - "userId": "mike@abccorp.com", - "name": "Mike" - } - }, - { - "payload": { - "content": "Thanks Mike! Great to connect with you Steve. I would really like to learn more about your recruitment needs. A quick call would be helpful. What time works best for you?" - }, - "from": { - "userId": "natalia@example.com", - "name": "Natalia" - } - }, - { - "payload": { - "content": "Hi Natalia, great to connect with you as well. I am pretty open tomorrow after 2 pm." - }, - "from": { - "userId": "steve@abccorp.com", - "name": "Steve" - } - }, - { - "payload": { - "content": "That'\''s perfect. I will block some time at 2pm tomorrow to discuss further. Looking forward to talk to you. Looking forward to talk to you." - }, - "from": { - "userId": "natalia@example.com", - "name": "Natalia" - } - } - ] -}' -``` - -PUT request with which to append messages to an existing conversation. - -```bash -curl --location --request PUT 'https://api.symbl.ai/v1/process/text/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "messages": [ - { - "payload": { - "content": "Hi Steve, it was great chatting with you! I was really impressed with your recruitment philosophy. I will set up meetings with two students for the internship positions that match your screening criteria over next week. Hope you have a great weekend." - }, - "from": { - "userId": "natalia@example.com", - "name": "Natalia" - } - }, - { - "payload": { - "content": "Thanks Natalia. It was great talking with you as well. Looking forward." - }, - "from": { - "userId": "steve@abccorp.com", - "name": "Steve" - } - } - ] -}' - -``` -### Audio Processing with a File - -POST request with which to submit a new file for transcription. The file must conform to a codec for audio. - -Here is the API endpoint: -`https://api.symbl.ai/v1/process/audio` - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/process/audio' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: audio/wav' \ ---data-binary '@' -``` - -PUT request with which to append to an existing file whose codec conforms to audio. - -`https://api.symbl.ai/v1/process/audio/{{conversation_id}}` - -```bash -curl --location --request PUT 'https://api.symbl.ai/v1/process/audio/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: audio/mpeg' \ ---data-binary '@' -``` - -### Audio Processing with a URL - -POST request with which to submit a new file for a new `conversationId`. - -Here is the API endpoint: -`https://api.symbl.ai/v1/process/audio/url` - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/process/audio/url' \ ---header 'x-api-key:' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "url": "", - "confidenceThreshold": 0.6, - "timezoneOffset": 0 -}' -``` -PUT request with which to append to an existing file whose codec conforms to audio. - -```bash -curl --location --request PUT 'https://api.symbl.ai/v1/process/audio/url/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "url": "", - "confidenceThreshold": 0.6, - "timezoneOffset": 0 -}' -``` -### Video Processing with a File - -POST request with which to submit a new file whose codec conforms to video. - -Here is the API endpoint: -`https://api.symbl.ai/v1/process/video` - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/process/video' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: video/mp4' \ ---data-binary '@' -``` -PUT request with which to append to a file whose codec conforms to video. - -```bash -curl --location --request PUT 'https://api.symbl.ai/v1/process/video/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: video/mp4' \ ---data-binary '@' -``` - -### Video Processing with URL - -POST request with which to post a file whose codec conforms to a video through a url. - -Here is the API endpoint: -`https://api.symbl.ai/v1/process/video/url` - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/process/video/url' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "url": "", - "confidenceThreshold": 0.6, - "timezoneOffset": 0 -}' -``` - -PUT request with which to append to a file whose codec conforms to video through a url. - -```bash - -curl --location --request PUT 'https://api.symbl.ai/v1/process/video/url/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "url": "", - "confidenceThreshold": 0.6, - "timezoneOffset": 0 -}' -``` - -## Conversation API - -### Conversation - -GET request with which to get a conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Messages (Transcripts) - -GET request with which to get messages. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/messages` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/messages' \ ---header 'x-api-key: $AUTH_TOKEN' \ - -``` - -### Members (Participants / Attendees) - -GET request with which to get members from a conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/members` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/members' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -PUT request with which to update an existing list of members with new members. - -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/members/{{member_id}}` - -```bash -curl --location --request PUT 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/members/' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "id": "", - "email": "john@example.com", - "name": "John" -}' -``` - -### Topics -GET request with which to get topics from a conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/topics?parentRefs=true` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/6335358679646208/topics?parentRefs=true' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Questions -GET request with which to get questions from a conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/questions` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/questions' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Follow Ups -GET request with which to request follow ups. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/follow-ups` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/follow-ups' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Action Items -GET request with which to get action items. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/action-items` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/action-items' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Analytics -GET request with which to get action items from a conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/action-items` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/action-items' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Entities -GET request with which to get entities. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/entities` - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/entities' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -### Delete -DEL request with which to delete a conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}` - -```bash -curl --location --request DELETE 'https://api.symbl.ai/v1/conversations/{{conversation_id}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ -``` - -## Experience - -POST request for returning a summary of details on your conversation. - -Here is the API endpoint: -`https://api.symbl.ai/v1/conversations/{{conversation_id}}/experiences` - -```bash -curl --location --request POST 'https://api.symbl.ai/v1/conversations/{{conversation_id}}/experiences' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '{ - "name": "verbose-text-summary" -}' -``` - -## Job - -GET request with which to check the status of a job. - -Here is the API endpoint: - -```bash -curl --location --request GET 'https://api.symbl.ai/v1/job/{{jobId}}' \ ---header 'x-api-key: $AUTH_TOKEN' \ ---header 'Content-Type: application/json' \ ---data-raw '' -``` From 3e89e92f32a487cf9ecd547d099f337e605d6586 Mon Sep 17 00:00:00 2001 From: Adam Voliva Date: Fri, 3 Dec 2021 21:25:30 -0800 Subject: [PATCH 45/64] Update reference.md --- javascript-sdk/reference/reference.md | 68 +++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/javascript-sdk/reference/reference.md b/javascript-sdk/reference/reference.md index 78a50db7..120507a4 100644 --- a/javascript-sdk/reference/reference.md +++ b/javascript-sdk/reference/reference.md @@ -282,6 +282,12 @@ handlers: { onTopicResponse: (data) => { console.log('onTopicResponse', JSON.stringify(data, null, 2)) } + /** + * When Symbl detects a tracker word, this callback will be called. + */ + onTrackerResponse: (data) => { + console.log('onTrackerResponse', JSON.stringify(data.trackers, null, 2)); + } } ``` @@ -461,3 +467,65 @@ This callback provides you with any of the detected topics in real-time as they "type": "topic" }] ``` + +### onTrackerResponse + +This callback provides you with any of the detected trackers in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every tracker in case of multiple streams. + +#### onTrackerResponse JSON Response Example + +```json +{ + "type": "tracker_response", + "isFinal": true, + "trackers": [ + { + "name": "Goodness", + "matches": [ + { + "type": "vocabulary", + "value": "This is awesome", + "messageRefs": [ + { + "id": "fa93aa64-0e8d-4697-bb52-e2916ca63192", + "text": "This is awesome.", + "offset": 0 + } + ], + "insightRefs": [] + }, + { + "type": "vocabulary", + "value": "Hello world", + "messageRefs": [ + { + "id": "8e720656-fed7-4b11-b359-3931c53bbcec", + "text": "Hello world.", + "offset": 0 + } + ], + "insightRefs": [] + } + ] + }, + { + "name": "Goodness", + "matches": [ + { + "type": "vocabulary", + "value": "I like it", + "messageRefs": [ + { + "id": "193dc144-2b55-4214-b211-ab83bd3e4a2e", + "text": "I love it.", + "offset": -1 + } + ], + "insightRefs": [] + } + ] + } + ], + "sequenceNumber": 1 +} +``` From d7a542a96e0f63be8190f5d30ee95a0130d4693c Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Tue, 7 Dec 2021 15:02:50 +0530 Subject: [PATCH 46/64] Receive Trackers in Spanish --- .../receive-trackers-in-spanish.md | 326 ++++++++++++++++++ 1 file changed, 326 insertions(+) create mode 100644 streamingapi/code-snippets/receive-trackers-in-spanish.md diff --git a/streamingapi/code-snippets/receive-trackers-in-spanish.md b/streamingapi/code-snippets/receive-trackers-in-spanish.md new file mode 100644 index 00000000..81fb2b79 --- /dev/null +++ b/streamingapi/code-snippets/receive-trackers-in-spanish.md @@ -0,0 +1,326 @@ +--- +id: receive-trackers-in-spanish +title: Receive Trackers in Spanish +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This tutorial goes over how you can use the Streaming API to receive Trackers of a conversation in the Spanish language. This example uses native JavaScript. + +:::note +Currently, we only support the English and Spanish language in Trackers for Streaming & Async API. Please feel free to reach out to us at support@symbl.ai for any queries. +::: + +**Prerequisite**: +1. **API Credentials**: You must have your Symbl API App ID and Secret. To grab your API Credentials, follow these steps given in the [Authentication](/docs/developer-tools/authentication) section. + +## Step 1: Define Spanish language in the Start Request Config + +When you're connecting to the [Web Socket](/docs/concepts/websockets) with Streaming API, you can define which language you wish to use in the `start_request` configuration. +For the Spanish language, we will set the `languageCode` key to Spanish (`es-ES`) in the `config` object as shown below: + +### Config + +```js +config: { + meetingTitle: "Spanish Trackers", + confidenceThreshold: 0.8, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: "es-ES", //Spanish language code + sampleRateHertz, + trackers: { + interimResults: true + } + }, +``` + +You can view the complete list of all the supported parameters passed in the configuration object [here](/docs/streaming-api/code-snippets/consume-trackers-with-streaming-api#parameter-description). + +## Step 2: Pass Tracker object in Spanish + +Now, define the keywords and phrases you wish to track in the `trackers` object as shown in the code snippet below. + +You must then pass these Spanish Trackers in the Streaming API request to receive tracked messages in Spanish. + +```js +// Spanish Trackers + trackers: [ + { + name: 'deseos', + vocabulary: [ + "Hasta luego", + "Muchas gracias y que eres increíble", + "Gracias", + "Bueno", + "Realmente lindo", + "Ese fue uno de los más", + "De acuerdo, gracias", + "Así que, a la inversa, gracias por estar presente", + ], + }, { + name: 'dificil', + vocabulary: [ + 'Sí, no me amo por completo en este momento y creo que hay mucho trabajo por hacer y estoy luchando', + 'Sé que estás pasando por tu propia pequeña lucha', + 'He tenido otras luchas más grandes en la vida porque el módulo de ahí arriba, mantén la vida en la parte inferior del cuerpo', + 'Los problemas son luchas internas', + ] + }, { + name: 'dinero', + vocabulary: [ + "Si alguien te quita todo lo que quieres hoy, todo tu dinero, tu casa, y te pone en camino", + "Ahora, sea lo que sea, poseo más de la mitad del dinero", + "Tengo dinero", + ] + }, + ], +``` + +### Full Code Snippet +View the full-code snippet for getting Trackers in Spanish with Streaming API below: + +:::note +In the sample below, we have generated the Spanish Trackers using our JavaScript SDK. +::: + +```js +const { sdk } = require('@symblai/symbl-js') +const uuid = require('uuid').v4 +// In this example, we are using mic to get audio from the microphone and passing it on to the WebSocket connection. +const mic = require('mic') +const sampleRateHertz = 16000 +const micInstance = mic({ + rate: sampleRateHertz, + channels: '1', + debug: false, + exitOnSilence: 6, +}); +(async () => { + try { + // Initialize the SDK + await sdk.init({ + appId: appId, + appSecret: appSecret, + basePath: 'https://api-labs.symbl.ai', + }) + // Add your unique ID here + const id = uuid() + const connection = await sdk.startRealtimeRequest({ + id, + insightTypes: ['action_item', 'question'], + customVocabulary: ['John', 'Symbl'], // Custom Vocabulary + noConnectionTimeout: 100, //No Connection Timeout + // This shows the Spanish Trackers + trackers: [ + { + name: 'deseos', + vocabulary: [ + "Hasta luego", + "Muchas gracias y que eres increíble", + "Gracias", + "Bueno", + "Realmente lindo", + "Ese fue uno de los más", + "De acuerdo, gracias", + "Así que, a la inversa, gracias por estar presente", + ], + }, { + name: 'dificil', + vocabulary: [ + 'Sí, no me amo por completo en este momento y creo que hay mucho trabajo por hacer y estoy luchando', + 'Sé que estás pasando por tu propia pequeña lucha', + 'He tenido otras luchas más grandes en la vida porque el módulo de ahí arriba, mantén la vida en la parte inferior del cuerpo', + 'Los problemas son luchas internas', + ] + }, { + name: 'dinero', + vocabulary: [ + "Si alguien te quita todo lo que quieres hoy, todo tu dinero, tu casa, y te pone en camino", + "Ahora, sea lo que sea, poseo más de la mitad del dinero", + "Tengo dinero", + ] + }, + { + name: "covid", + vocabulary: [ + "wear mask", + "coughing", + "fever", + "cold", + "trouble breathing" + ] + } + ], + config: { + meetingTitle: "Spanish Trackers", + confidenceThreshold: 0.8, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: "es-ES", //Spanish language code + sampleRateHertz, + trackers: { + interimResults: true + } + }, + speaker: { + // Optional, if not specified, will simply not send an email in the end. + userId: "john@example.com", // Update with valid email + name: "John", + }, + handlers: { + onSpeechDetected: (data) => { + if (data) { + const { punctuated } = data + console.log('Live: ', punctuated && punctuated.transcript) + console.log(''); + } + console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); + }, + onMessageResponse: (data) => { + console.log('onMessageResponse', JSON.stringify(data, null, 2)) + }, + onInsightResponse: (data) => { + console.log('onInsightResponse', JSON.stringify(data, null, 2)) + }, + onTopicResponse: (data) => { + console.log('onTopicResponse', JSON.stringify(data, null, 2)) + }, + onTrackerResponse: (data) => { + console.log('onTrackerResponse', JSON.stringify(data, null, 2)) + }, + onTrackerResponse: (data) => { + // When a tracker is detected in real-time + console.log('onTrackerResponse', JSON.stringify(data, null, 2)); + if (!!data) { + data.forEach((tracker) => { + console.log(`Detected Tracker Name: ${tracker.name}`); + console.log(`Detected Matches`); + tracker.matches.forEach((match) => { + console.log(`Tracker Value: ${match.value}`); + console.log(`Messages detected against this Tracker`); + match.messageRefs.forEach((messageRef) => { + console.log(`Message ID: ${messageRef.id}`); + console.log(`Message text for which the match was detected: ${messageRef.text}`); + console.log(`\n`); + }); + console.log(`\n\n`); + console.log(`Insights detected against this Tracker`); + match.messageRefs.forEach((insightRef) => { + console.log(`Insight ID: ${insightRef.id}`); + console.log(`Insight text for which the match was detected: ${insightRef.text}`); + console.log(`Insight Type: ${insightRef.type}`); + console.log(`\n`); + }); + console.log(`\n\n`); + }); + }); + } + }, + }, + }); + console.log('Successfully connected. Conversation ID: ', connection.conversationId); + const micInputStream = micInstance.getAudioStream() + /** Raw audio stream */ + micInputStream.on('data', (data) => { + // Push audio from Microphone to websocket connection + connection.sendAudio(data) + }) + micInputStream.on('error', function (err) { + console.log('Error in Input Stream: ' + err) + }) + micInputStream.on('startComplete', function () { + console.log('Started listening to Microphone.') + }) + micInputStream.on('silence', function () { + console.log('Got SIGNAL silence') + }) + micInstance.start() + setTimeout(async () => { + // Stop listening to microphone + micInstance.stop() + console.log('Stopped listening to Microphone.') + try { + // Stop connection + await connection.stop() + console.log('Connection Stopped.') + } catch (e) { + console.error('Error while stopping the connection.', e) + } + }, 60 * 1000) // Stop connection after 1 minute i.e. 60 secs + } catch (e) { + console.error('Error: ', e) + } +})(); +``` + +### Connect Mic +After you connect, you want to connect to your device's microphone. This code is inserted after the `connection` and before the closing of the `try...catch` + +```js +const micInputStream = micInstance.getAudioStream() +/** Raw audio stream */ +micInputStream.on('data', (data) => { + // Push audio from Microphone to websocket connection + connection.sendAudio(data) +}) + +micInputStream.on('error', function (err) { + console.log('Error in Input Stream: ' + err) +}) + +micInputStream.on('startComplete', function () { + console.log('Started listening to Microphone.') +}) + +micInputStream.on('silence', function () { + console.log('Got SIGNAL silence') +}) + +micInstance.start() + +setTimeout(async () => { + // Stop listening to microphone + micInstance.stop() + console.log('Stopped listening to Microphone.') + try { + // Stop connection + await connection.stop() + console.log('Connection Stopped.') + } catch (e) { + console.error('Error while stopping the connection.', e) + } +}, 60 * 1000) // Stop connection after 1 minute i.e. 60 secs +``` + +### Testing + +If you want to know that you have connected the mic, run the following commands: + +1. Create a JavaScript file named `app.js` +2. Copy the above [code](/docs/streamingapi/code-snippets/rreceive-trackers-in-spanish/#connect-mic) into the file. +3. Replace the placeholder values with the values that you must use. +4. Use `npm` to install the required libraries + +```bash +$ npm install symbl-node uuid +``` +5. Now in the terminal run + +```bash +$ node app.js +``` + +If successful you should receive a response in the console. + +### Handlers Reference (Symbl SDK) + +Read more about the supported Event Handlers in the following sections: + +👉 [onSpeechDetected](/docs/javascript-sdk/reference/#onspeechdetected) +👉 [onMessageResponse](/docs/javascript-sdk/reference/#onmessageresponse) +👉 [onInsightResponse](/docs/javascript-sdk/reference/#oninsightresponse) +👉 [onTopicResponse](/docs/javascript-sdk/reference/#ontopicresponse) + + + + From 11ce94952ec2a065c20fedd24c488c42950b7e38 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Wed, 8 Dec 2021 14:04:40 +0530 Subject: [PATCH 47/64] Sentiment analysis on Messages and Topics. Updates to Receiver trackers in Spanish --- .../sentiment-analysis-on-messages.md | 191 ++++++++++ .../sentiment-analysis-on-topics.md | 205 +++++++++++ .../receive-trackers-in-spanish.md | 326 ++++++++++++++++++ 3 files changed, 722 insertions(+) create mode 100644 async-api/code-snippets/sentiment-analysis-on-messages.md create mode 100644 async-api/code-snippets/sentiment-analysis-on-topics.md create mode 100644 streamingapi/code-snippets/receive-trackers-in-spanish.md diff --git a/async-api/code-snippets/sentiment-analysis-on-messages.md b/async-api/code-snippets/sentiment-analysis-on-messages.md new file mode 100644 index 00000000..ed82ec62 --- /dev/null +++ b/async-api/code-snippets/sentiment-analysis-on-messages.md @@ -0,0 +1,191 @@ +--- +id: sentiment-analysis-on-messages +title: Sentiment Analysis on Messages +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +Sentiment Analysis is the process of determining whether each message/line in a conversation is positive, negative, or neutral. Sentiment analysis on messages combines natural language processing and machine learning procedures to allot sentiment scores to the message. + +:::note +Currently, with Sentiment Analysis, en-US (English US) is the only language supported. +::: + +For Sentiment Analysis on Messages, you need to process your conversation with Symbl and then use conversationId you received in Messages APIs and pass sentiment parameter to get the transcript with the sentiment, polarity, and score. + +To get Sentiment Analysis on Messages, follow the detailed steps given below: + +## Prerequisites +1. Your Symbl API Credentials. You can get these on the [Symbl Platform](https://platform.symbl.ai/#/login). If you're new to the platform, [create an account](https://platform.symbl.ai/?_ga=2.201716818.257081579.1638006100-1289486206.1635166797#/signup) to get started. If you get stuck somewhere, [Slack us](https://symbldotai.slack.com/join/shared_invite/zt-4sic2s11-D3x496pll8UHSJ89cm78CA#/). +2. You have generated your access token. To learn how to do the same, go to our [Authentication](/docs/developer-tools/authentication/#step-2-generate-the-access-token) page. + +## Step 1: Process conversation with Symbl +--- + +The first step is to use Symbl (text, audio, and video) Async APIs to process your conversation. + +In this example, we will process an audio file (in .wav format) via Async Audio APIs. You can do the same for text conversations and video conversations as well. + +To view the complete reference, see [Async API documentation](/docs/async-api/introduction). + +### Example API Endpoint + +`POST https://api.symbl.ai/v1/process/audio` + +### Request Headers + +Ensure that you use your Bearer token in the Authorization header to make the API request. It is mandatory to create your Bearer token to invoke Symbl API calls. + +To learn more about the Authorization Header, go to [Async POST Audio API](/docs/async-api/overview/audio/post-audio#request-headers) page. + +```shell +curl --location --request POST "https://api.symbl.ai/v1/process/audio \ +--header 'Content-Type: audio/wav' \ +--header "Authorization: Bearer $AUTH_TOKEN" \ +--data-binary '@/file/location/audio.wav' +``` + +Header Name | Required | Value +----------- | ------- | ------- | +```Authorization``` | Mandatory | `Bearer ` The token you get from our [authentication process](/docs/developer-tools/authentication). +```Content-Length```| Mandatory | This should correctly indicate the length of the request body in bytes. +```Content-Type``` | Optional | This is OPTIONAL field which describes the format and codec of the provided audio data. Accepted values are `audio/wav`, `audio/mpeg`, `audio/mp3` and `audio/wave` only. If your audio is in a format other than these, do not use this field. +```x-api-key``` | Optional | DEPRECATED. The JWT token you get from our [authentication process](/docs/developer-tools/authentication). + +:::note +Headers requirements vary based on the conversation “content-type”. Therefore, refer to the respective [API Reference](/docs/async-api/introduction) for the exact Header values to be passed based on your requirements. +::: + +### Response Body + +```js +{ + "conversationId": "5006340993843200", + "jobId": "f98171b5-0f24-4582-92bc-325c3fa9473b" +} +``` + +This Conversation ID is a unique identifier of the conversation and will be applied in the Messages API to receive the Sentiment Analysis. This is provided in details in Step 2. + +The Job ID is a unique identifier of the processing job(ongoing API request) that you can use to get the job status. Learn more about how to get the Job Status on the [JOB API](/docs/async-api/overview/jobs-api/) page. + +:::note +You must wait for he processing job to be completed before you can proceed with the next step. The time taken for the job to complete depends on the file size. +::: + +## Step 2: Use `conversationId` in Messages APIs and pass `sentiment=true` +--- + +The Conversation ID you received in Step 1 should be passed in the [Messages API](/docs/concepts/speech-to-text). Also, ensure that you pass `sentiment=true` as a query parameter. + +### Example API Endpoint + +​​`GET https://api.symbl.ai/v1/conversations/{conversationId}/messages?sentiment=true` + +### Request Headers + +Ensure that you use your Bearer token in the Authorization header to make the API request. It is mandatory to create your Bearer token to invoke Symbl API calls. + +To learn more about the Authorization Header, go to [Async POST Audio API](/docs/async-api/overview/audio/post-audio#request-headers) page. + +```shell +curl --location --request GET +"https://api.symbl.ai/v1/conversations/{conversationId}/messages?sentiment=true" \ +--header 'Content-Type: audio/wav' \ +--header "Authorization: Bearer $AUTH_TOKEN" \ +--data-binary '@/file/location/audio.wav' +``` + +Header Name | Required | Value +----------- | ------- | ------- | +```Authorization``` | Mandatory | `Bearer ` The token you get from our [authentication process](/docs/developer-tools/authentication). +```Content-Length```| Mandatory | This should correctly indicate the length of the request body in bytes. +```Content-Type``` | Optional | This is OPTIONAL field which describes the format and codec of the provided audio data. Accepted values are `audio/wav`, `audio/mpeg`, `audio/mp3` and `audio/wave` only. If your audio is in a format other than these, do not use this field. +```x-api-key``` | Optional | DEPRECATED. The JWT token you get from our [authentication process](/docs/developer-tools/authentication). + +:::note +Headers requirements vary based on the conversation “content-type”. Therefore, refer to the respective [API Reference](/docs/async-api/introduction) for the exact Header values to be passed based on your requirements. +::: + +### Response + +```js +{ + "messages":[ + { + "id":"6131375637790720", + "text":"Okay, so you're talking about that file, which I am sending you.", + "from":{ + + }, + "startTime":"2021-04-12T22:10:39.881Z", + "endTime":"2021-04-12T22:10:43.981Z", + "conversationId":"6320529160011776", + "phrases":[ + { + "type":"action_phrase", + "text":"sending I you" + } + ], + "sentiment":{ + "polarity":{ + "score":-0.201 + }, + "suggested":"neutral" + } + }, + { + "id":"6605033355345920", + "text":"Ah There is way I don't think there is way too now.", + "from":{ + + }, + "startTime":"2021-04-12T22:10:46.681Z", + "endTime":"2021-04-12T22:10:53.281Z", + "conversationId":"6320529160011776", + "phrases":[ + { + "type":"action_phrase", + "text":"think there is way too now" + } + ], + "sentiment":{ + "polarity":{ + "score":-0.201 + }, + "suggested":"neutral" + } + }, + ... + ] +``` + +The body parameter is defined in the operation's parameters section and includes the following: objects that describe the body data type and structure. Learn more about Response Body Parameters [here](/docs/conversation-api/messages/#response-object). + +### Polarity and Score +Sentiment Analysis is the interpretation of the general thought, feeling, or sense of an object or a situation. In the Response, you’ll notice the `sentiment` field which shows the sentiment type (positive, negative, and neutral). It is scored using “polarity” values which shows the intensity of the sentiment. It ranges from -1.0 to 1.0, where -1.0 is the most negative sentiment and 1.0 is the most positive sentiment. + +### Sample Code Snippet: + +```js +{ + "sentiment": { + "polarity": { + "score": 0.6 + } + } +} +``` + +| polarity | Suggested Sentiment | +|------------------|---------------------| +| -1.0 => x > -0.3 | negative | +| -0.3 => x <= 0.3 | neutral | +| 0.3 < x <= 1.0 | positive | + +:::info +We have chosen the below polarity ranges with respect to sentiment type which covers a wide range of conversations. Polarity Sentiment may vary for your use case. We recommend that you define a threshold that works for you, and then adjust the threshold after testing and verifying the results. +::: \ No newline at end of file diff --git a/async-api/code-snippets/sentiment-analysis-on-topics.md b/async-api/code-snippets/sentiment-analysis-on-topics.md new file mode 100644 index 00000000..7e226de4 --- /dev/null +++ b/async-api/code-snippets/sentiment-analysis-on-topics.md @@ -0,0 +1,205 @@ +--- +id: sentiment-analysis-on-topics +title: Sentiment Analysis on Topics +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +Topics are key drivers of the conversation. They're the most important keywords or phrases used. The topics algorithm provides a framework for the user to calibrate and precisely model the relationship among the concepts and understand how the semantics of the meetings is talked upon. Sentiment Analysis on topics determines whether the Topics resulting from the conversation are positive, negative, or neutral. + +:::note +Currently, with Sentiment Analysis, en-US (English US) is the only language supported. +::: + +To get Sentiment Analysis on Text, follow the steps given below: + +## Prerequisites +1. Your Symbl API Credentials. You can get these on the [Symbl Platform](https://platform.symbl.ai/#/login). If you're new to the platform, [create an account](https://platform.symbl.ai/?_ga=2.201716818.257081579.1638006100-1289486206.1635166797#/signup) to get started. If you get stuck somewhere, [Slack us](https://symbldotai.slack.com/join/shared_invite/zt-4sic2s11-D3x496pll8UHSJ89cm78CA#/). +2. You have generated your access token. To learn how to do the same, check out our [Authentication](/docs/developer-tools/authentication/#step-2-generate-the-access-token) page. + +## Step 1: Process conversation with Symbl +--- + +The first step is to use Symbl (text, audio, and video) Async APIs to process your conversation. + +In this example, we will process an audio file (in .wav format) via Async Audio APIs. You can do the same for text conversations and video conversations as well. + +To view the complete reference, see [Async API documentation](/docs/async-api/introduction). + +### Example API Endpoint + +`POST https://api.symbl.ai/v1/process/audio` + +### Request Headers + +Ensure that you use your Bearer token in the Authorization header to make the API request. It is mandatory to create your Bearer token to invoke Symbl API calls. + +To learn more about the Authorization Header, go to [Async POST Audio API](/docs/async-api/overview/audio/post-audio#request-headers) page. + +```shell +curl --location --request POST "https://api.symbl.ai/v1/process/audio \ +--header 'Content-Type: audio/wav' \ +--header "Authorization: Bearer $AUTH_TOKEN" \ +--data-binary '@/file/location/audio.wav' +``` + +Header Name | Required | Value +----------- | ------- | ------- | +```Authorization``` | Mandatory | `Bearer ` The token you get from our [authentication process](/docs/developer-tools/authentication). +```Content-Length```| Mandatory | This should correctly indicate the length of the request body in bytes. +```Content-Type``` | Optional | This is OPTIONAL field which describes the format and codec of the provided audio data. Accepted values are `audio/wav`, `audio/mpeg`, `audio/mp3` and `audio/wave` only. If your audio is in a format other than these, do not use this field. +```x-api-key``` | Optional | DEPRECATED. The JWT token you get from our [authentication process](/docs/developer-tools/authentication). + +:::note +Headers requirements vary based on the conversation “content-type”. Therefore, refer to the respective [API Reference](/docs/async-api/introduction) for the exact Header values to be passed based on your requirements. +::: + +### Response Body +```js +{ + "conversationId": "5006340993843200", + "jobId": "f98171b5-0f24-4582-92bc-325c3fa9473b" +} +``` + +This Conversation ID is a unique identifier of the conversation and will be applied in the Messages API to receive the Sentiment Analysis. This is provided in details in Step 2. + +The Job ID is a unique identifier of the processing job(ongoing API request) that you can use to get the job status. Learn more about how to get the Job Status on the [JOB API](/docs/async-api/overview/jobs-api/) page. + +:::note +You must wait for he processing job to be completed before you can proceed with the next step. The time taken for the job to complete depends on the file size. +::: + +## Step 2: Use `conversationId` in Topics APIs and pass `sentiment=true` +--- + +The Conversation ID you received in Step 1 should be passed in the [Topics API](/docs/concepts/topics). Also, ensure that you pass `sentiment=true` as a query parameter. + +### Example API Endpoint + +​​`​​GET https://api.symbl.ai/v1/conversations/{conversationId}/topics?sentiment=true` + +### Request Headers + +Ensure that you use your Bearer token in the Authorization header to make the API request. It is mandatory to create your Bearer token to invoke Symbl API calls. + +To learn more about the Authorization Header, go to [Async POST Audio API](/docs/async-api/overview/audio/post-audio#request-headers) page. + +```shell +curl --location --request GET +"https://api.symbl.ai/v1/conversations/{conversationId}/topics?sentiment=true" \ +--header 'Content-Type: audio/wav' \ +--header "Authorization: Bearer $AUTH_TOKEN" \ +--data-binary '@/file/location/audio.wav' +``` + +Header Name | Required | Value +----------- | ------- | ------- | +```Authorization``` | Mandatory | `Bearer ` The token you get from our [authentication process](/docs/developer-tools/authentication). +```Content-Length```| Mandatory | This should correctly indicate the length of the request body in bytes. +```Content-Type``` | Optional | This is OPTIONAL field which describes the format and codec of the provided audio data. Accepted values are `audio/wav`, `audio/mpeg`, `audio/mp3` and `audio/wave` only. If your audio is in a format other than these, do not use this field. +```x-api-key``` | Optional | DEPRECATED. The JWT token you get from our [authentication process](/docs/developer-tools/authentication). + +:::note +Headers requirements vary based on the conversation “content-type”. Therefore, refer to the respective [API Reference](/docs/async-api/introduction) for the exact Header values to be passed based on your requirements. +::: + +### Response + +```js +{ + "topics":[ + { + "id":"5907389282779136", + "text":"interns", + "type":"topic", + "score":0.7178597920690242, + "messageIds":[ + "4600982711304192", + "5487363432120320", + "6109794119188480" + ], + "sentiment":{ + "polarity":{ + "score":0.389 + }, + "suggested":"positive" + }, + "parentRefs":[ + + ] + }, + { + "id":"5776859730018304", + "text":"company-wise hiring", + "type":"topic", + "score":0.788856914361565, + "messageIds":[ + "6298570346987520", + "6330577953226752" + ], + "sentiment":{ + "polarity":{ + "score":0.012 + }, + "suggested":"neutral" + }, + "parentRefs":[ + + ] + }, + { + "id":"6697188878974976", + "text":"new regulations", + "type":"topic", + "score":0.6968750176932417, + "messageIds":[ + "5356560840654848", + "5663440783802368", + "5263998490509312", + "6082396449406976", + "4925138187321344" + ], + "sentiment":{ + "polarity":{ + "score":-0.809 + }, + "suggested":"negative" + }, + "parentRefs":[ + + ] + } + ] +} +``` + +The body parameter is defined in the operation's parameters section and includes the following: objects that describe the body data type and structure. Learn more about Response Body Parameters [here](/docs/conversation-api/messages/#response-object). + +### Polarity and Score +Sentiment Analysis is the interpretation of the general thought, feeling, or sense of an object or a situation. In the Response, you’ll notice the `sentiment` field which shows the sentiment type (positive, negative, and neutral). It is scored using “polarity” values which shows the intensity of the sentiment. It ranges from -1.0 to 1.0, where -1.0 is the most negative sentiment and 1.0 is the most positive sentiment. + +### Sample Code Snippet: + +```js +{ + "sentiment": { + "polarity": { + "score": 0.6 + } + } +} +``` + +| polarity | Suggested Sentiment | +|------------------|---------------------| +| -1.0 => x > -0.3 | negative | +| -0.3 => x <= 0.3 | neutral | +| 0.3 < x <= 1.0 | positive | + +:::info +We have chosen the below polarity ranges with respect to sentiment type which covers a wide range of conversations. Polarity Sentiment may vary for your use case. We recommend that you define a threshold that works for you, and then adjust the threshold after testing and verifying the results. +::: \ No newline at end of file diff --git a/streamingapi/code-snippets/receive-trackers-in-spanish.md b/streamingapi/code-snippets/receive-trackers-in-spanish.md new file mode 100644 index 00000000..fca9318e --- /dev/null +++ b/streamingapi/code-snippets/receive-trackers-in-spanish.md @@ -0,0 +1,326 @@ +--- +id: receive-trackers-in-spanish +title: Receive Trackers in Spanish +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +This tutorial goes over how you can use the Streaming API to receive Trackers of a conversation in the Spanish language. This example uses native JavaScript. + +:::note +Currently, we only support the English and Spanish language in Trackers for Streaming & Async API. Please feel free to reach out to us at support@symbl.ai for any queries. +::: + +## Prerequisite +- **API Credentials**: You must have your Symbl API App ID and Secret. To grab your API Credentials, follow these steps given in the [Authentication](/docs/developer-tools/authentication) section. + +## Step 1: Define Spanish language in the Start Request Config +--- + +When you're connecting to the [WebSocket](/docs/concepts/websockets) with Streaming API, you can define which language you wish to use in the `start_request` configuration. +For the Spanish language, we will set the `languageCode` key to Spanish (`es-ES`) in the `config` object as shown below: + +### Config + +```js +config: { + meetingTitle: "Spanish Trackers", + confidenceThreshold: 0.8, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: "es-ES", //Spanish language code + sampleRateHertz, + trackers: { + interimResults: true + } + }, +``` + +You can view the complete list of all the supported parameters passed in the configuration object [here](/docs/streaming-api/code-snippets/consume-trackers-with-streaming-api#parameter-description). + +## Step 2: Pass Tracker object in Spanish +--- + +Now, define the keywords and phrases you wish to track in the `trackers` object as shown in the code snippet below. + +You must then pass these Spanish Trackers in the Streaming API request to receive tracked messages in Spanish. + +```js +// Spanish Trackers + trackers: [ + { + name: 'deseos', + vocabulary: [ + "Hasta luego", + "Muchas gracias y que eres increíble", + "Gracias", + "Bueno", + "Realmente lindo", + "Ese fue uno de los más", + "De acuerdo, gracias", + "Así que, a la inversa, gracias por estar presente", + ], + }, { + name: 'dificil', + vocabulary: [ + 'Sí, no me amo por completo en este momento y creo que hay mucho trabajo por hacer y estoy luchando', + 'Sé que estás pasando por tu propia pequeña lucha', + 'He tenido otras luchas más grandes en la vida porque el módulo de ahí arriba, mantén la vida en la parte inferior del cuerpo', + 'Los problemas son luchas internas', + ] + }, { + name: 'dinero', + vocabulary: [ + "Si alguien te quita todo lo que quieres hoy, todo tu dinero, tu casa, y te pone en camino", + "Ahora, sea lo que sea, poseo más de la mitad del dinero", + "Tengo dinero", + ] + }, + ], +``` + +### Full Code Snippet +View the full-code snippet for getting Trackers in Spanish with Streaming API below: + +:::note +In the sample below, we have generated the Spanish Trackers using our JavaScript SDK. +::: + +```js +const { sdk } = require('@symblai/symbl-js') +const uuid = require('uuid').v4 +// In this example, we are using mic to get audio from the microphone and passing it on to the WebSocket connection. +const mic = require('mic') +const sampleRateHertz = 16000 +const micInstance = mic({ + rate: sampleRateHertz, + channels: '1', + debug: false, + exitOnSilence: 6, +}); +(async () => { + try { + // Initialize the SDK + await sdk.init({ + appId: appId, + appSecret: appSecret, + basePath: 'https://api-labs.symbl.ai', + }) + // Add your unique ID here + const id = uuid() + const connection = await sdk.startRealtimeRequest({ + id, + insightTypes: ['action_item', 'question'], + customVocabulary: ['John', 'Symbl'], // Custom Vocabulary + noConnectionTimeout: 100, //No Connection Timeout + // This shows the Spanish Trackers + trackers: [ + { + name: 'deseos', + vocabulary: [ + "Hasta luego", + "Muchas gracias y que eres increíble", + "Gracias", + "Bueno", + "Realmente lindo", + "Ese fue uno de los más", + "De acuerdo, gracias", + "Así que, a la inversa, gracias por estar presente", + ], + }, { + name: 'dificil', + vocabulary: [ + 'Sí, no me amo por completo en este momento y creo que hay mucho trabajo por hacer y estoy luchando', + 'Sé que estás pasando por tu propia pequeña lucha', + 'He tenido otras luchas más grandes en la vida porque el módulo de ahí arriba, mantén la vida en la parte inferior del cuerpo', + 'Los problemas son luchas internas', + ] + }, { + name: 'dinero', + vocabulary: [ + "Si alguien te quita todo lo que quieres hoy, todo tu dinero, tu casa, y te pone en camino", + "Ahora, sea lo que sea, poseo más de la mitad del dinero", + "Tengo dinero", + ] + }, + { + name: "covid", + vocabulary: [ + "wear mask", + "coughing", + "fever", + "cold", + "trouble breathing" + ] + } + ], + config: { + meetingTitle: "Spanish Trackers", + confidenceThreshold: 0.8, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: "es-ES", //Spanish language code + sampleRateHertz, + trackers: { + interimResults: true + } + }, + speaker: { + // Optional, if not specified, will simply not send an email in the end. + userId: "john@example.com", // Update with valid email + name: "John", + }, + handlers: { + onSpeechDetected: (data) => { + if (data) { + const { punctuated } = data + console.log('Live: ', punctuated && punctuated.transcript) + console.log(''); + } + console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); + }, + onMessageResponse: (data) => { + console.log('onMessageResponse', JSON.stringify(data, null, 2)) + }, + onInsightResponse: (data) => { + console.log('onInsightResponse', JSON.stringify(data, null, 2)) + }, + onTopicResponse: (data) => { + console.log('onTopicResponse', JSON.stringify(data, null, 2)) + }, + onTrackerResponse: (data) => { + console.log('onTrackerResponse', JSON.stringify(data, null, 2)) + }, + onTrackerResponse: (data) => { + // When a tracker is detected in real-time + console.log('onTrackerResponse', JSON.stringify(data, null, 2)); + if (!!data) { + data.forEach((tracker) => { + console.log(`Detected Tracker Name: ${tracker.name}`); + console.log(`Detected Matches`); + tracker.matches.forEach((match) => { + console.log(`Tracker Value: ${match.value}`); + console.log(`Messages detected against this Tracker`); + match.messageRefs.forEach((messageRef) => { + console.log(`Message ID: ${messageRef.id}`); + console.log(`Message text for which the match was detected: ${messageRef.text}`); + console.log(`\n`); + }); + console.log(`\n\n`); + console.log(`Insights detected against this Tracker`); + match.messageRefs.forEach((insightRef) => { + console.log(`Insight ID: ${insightRef.id}`); + console.log(`Insight text for which the match was detected: ${insightRef.text}`); + console.log(`Insight Type: ${insightRef.type}`); + console.log(`\n`); + }); + console.log(`\n\n`); + }); + }); + } + }, + }, + }); + console.log('Successfully connected. Conversation ID: ', connection.conversationId); + const micInputStream = micInstance.getAudioStream() + /** Raw audio stream */ + micInputStream.on('data', (data) => { + // Push audio from Microphone to websocket connection + connection.sendAudio(data) + }) + micInputStream.on('error', function (err) { + console.log('Error in Input Stream: ' + err) + }) + micInputStream.on('startComplete', function () { + console.log('Started listening to Microphone.') + }) + micInputStream.on('silence', function () { + console.log('Got SIGNAL silence') + }) + micInstance.start() + setTimeout(async () => { + // Stop listening to microphone + micInstance.stop() + console.log('Stopped listening to Microphone.') + try { + // Stop connection + await connection.stop() + console.log('Connection Stopped.') + } catch (e) { + console.error('Error while stopping the connection.', e) + } + }, 60 * 1000) // Stop connection after 1 minute i.e. 60 secs + } catch (e) { + console.error('Error: ', e) + } +})(); +``` + +### Connect Mic +After you connect, you want to connect to your device's microphone. This code is inserted after the `connection` and before the closing of the `try...catch` + +```js +const micInputStream = micInstance.getAudioStream() +/** Raw audio stream */ +micInputStream.on('data', (data) => { + // Push audio from Microphone to websocket connection + connection.sendAudio(data) +}) + +micInputStream.on('error', function (err) { + console.log('Error in Input Stream: ' + err) +}) + +micInputStream.on('startComplete', function () { + console.log('Started listening to Microphone.') +}) + +micInputStream.on('silence', function () { + console.log('Got SIGNAL silence') +}) + +micInstance.start() + +setTimeout(async () => { + // Stop listening to microphone + micInstance.stop() + console.log('Stopped listening to Microphone.') + try { + // Stop connection + await connection.stop() + console.log('Connection Stopped.') + } catch (e) { + console.error('Error while stopping the connection.', e) + } +}, 60 * 1000) // Stop connection after 1 minute i.e. 60 secs +``` + +### Testing + +If you want to know that you have connected the mic, run the following commands: + +1. Create a JavaScript file named `app.js` +2. Copy the above [code](#connect-mic) into the file. +3. Replace the placeholder values with the values that you must use. +4. Use `npm` to install the required libraries + +```bash +$ npm install symbl-node uuid +``` +5. Now in the terminal run + +```bash +$ node app.js +``` + +If successful you should receive a response in the console. + +### Handlers Reference (Symbl SDK) + +Read more about the supported Event Handlers in the following sections: + +👉 [onSpeechDetected](/docs/javascript-sdk/reference/#onspeechdetected)
+👉 [onMessageResponse](/docs/javascript-sdk/reference/#onmessageresponse)
+👉 [onInsightResponse](/docs/javascript-sdk/reference/#oninsightresponse)
+👉 [onTopicResponse](/docs/javascript-sdk/reference/#ontopicresponse)
\ No newline at end of file From 1606d0474d0cf549fa7c42aafeaa7608388747d5 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Thu, 9 Dec 2021 11:16:01 +0530 Subject: [PATCH 48/64] Update sentiment-analysis-on-topics.md --- async-api/code-snippets/sentiment-analysis-on-topics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/async-api/code-snippets/sentiment-analysis-on-topics.md b/async-api/code-snippets/sentiment-analysis-on-topics.md index 7e226de4..8c96f1fa 100644 --- a/async-api/code-snippets/sentiment-analysis-on-topics.md +++ b/async-api/code-snippets/sentiment-analysis-on-topics.md @@ -180,7 +180,7 @@ Headers requirements vary based on the conversation “content-type”. Therefor The body parameter is defined in the operation's parameters section and includes the following: objects that describe the body data type and structure. Learn more about Response Body Parameters [here](/docs/conversation-api/messages/#response-object). ### Polarity and Score -Sentiment Analysis is the interpretation of the general thought, feeling, or sense of an object or a situation. In the Response, you’ll notice the `sentiment` field which shows the sentiment type (positive, negative, and neutral). It is scored using “polarity” values which shows the intensity of the sentiment. It ranges from -1.0 to 1.0, where -1.0 is the most negative sentiment and 1.0 is the most positive sentiment. +Sentiment Analysis is the interpretation of the general thought, feeling, or sense of an object or a situation. In the Response, you’ll notice the `sentiment` field which shows the sentiment type (positive, negative, and neutral). It is scored using “polarity” values which shows the intensity of the sentiment. It ranges from -1.0 to 1.0, where -1.0 is the most negative sentiment and 1.0 is the most positive sentiment. ### Sample Code Snippet: From b701975de1e269042077d793f3d2d5d9f11b19fb Mon Sep 17 00:00:00 2001 From: Marcelo Jabali Date: Thu, 9 Dec 2021 12:00:34 -0800 Subject: [PATCH 49/64] Update connection id definition --- streamingapi/reference/reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/streamingapi/reference/reference.md b/streamingapi/reference/reference.md index 69d2d1bb..cc6b1b03 100644 --- a/streamingapi/reference/reference.md +++ b/streamingapi/reference/reference.md @@ -394,7 +394,7 @@ const connection = await sdk.startRealtimeRequest({ Let’s go over all the parameters passed in the configuration object in the above function: -1. `id`: A unique UUID that represents this WebSocket API's session. +1. `id`: A unique UUID that represents the users' session. 2. `insightType`: The types of **Insights** to be detected for this conversation. From 03e19543e50e9ecd6f0c0412282deaa5ee8785e4 Mon Sep 17 00:00:00 2001 From: Monish Basaniwal <46259712+monizb@users.noreply.github.com> Date: Fri, 10 Dec 2021 22:24:26 +0530 Subject: [PATCH 50/64] [TYPO] URL REQUIREMENT SECTION Fixed a small typo I found while reading the page :) --- async-api/overview/video/post-video-url.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/async-api/overview/video/post-video-url.md b/async-api/overview/video/post-video-url.md index 6ccfdb38..d89a1f0d 100644 --- a/async-api/overview/video/post-video-url.md +++ b/async-api/overview/video/post-video-url.md @@ -13,7 +13,7 @@ The Async Video URL API allows you to process video files (.mp4 format) and retu It can be utilized for any use case where you have access to recorded video stored publicly as a URL and want to extract insights and other conversational attributes supported by [Symbl's Conversation API](/docs/conversation-api/introduction). :::tip URL Requirement -The URL provided must be a publicly available URL. Currently we do not any support any redirected links, shortened links (e.g. bit.ly), YouTube, Vimeo, or links from any audio/video platforms. +The URL provided must be a publicly available URL. Currently we do not support any redirected links, shortened links (e.g. bit.ly), YouTube, Vimeo, or links from any audio/video platforms. ::: :::info @@ -403,4 +403,4 @@ Here value of `X` can be found in [FAQ](/docs/faq). :::caution You must wait for the job to complete processing before you proceed with getting the Conversation Intelligence. If you immediately make a GET request to Conversation API, it is possible that you'll receive incomplete insights. Therefore, ensure that you wait for the job to complete. -::: \ No newline at end of file +::: From bd0b76817f0a4df67a6c0d2674835c3f53c97341 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 13 Dec 2021 14:56:51 +0530 Subject: [PATCH 51/64] update on sentiment analysis --- conversation-api/concepts/sentiment.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/conversation-api/concepts/sentiment.md b/conversation-api/concepts/sentiment.md index 0d967a37..9c523775 100644 --- a/conversation-api/concepts/sentiment.md +++ b/conversation-api/concepts/sentiment.md @@ -130,3 +130,8 @@ Polarity Sentiment may vary for your use case. We recommend that you define a th | -1.0 => x > -0.3 | negative | | -0.3 => x <= 0.3 | neutral | | 0.3 > x <= 1.0 | positive | + +### Tutorials + +- View tutorial on Sentiment Analysis on Messages [here](/docs/async-api/code-snippets/sentiment-analysis-on-messages) +- View tutorial on Sentiment Analysis on Topics [here](/docs/async-api/code-snippets/sentiment-analysis-on-topics) \ No newline at end of file From ab32e7be0adddc2f9c4c4dd7e49717f5edb2e6bf Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Wed, 15 Dec 2021 10:22:44 +0530 Subject: [PATCH 52/64] Adding trackers changes --- changelog.md | 6 +++++ conversation-api/api-reference/trackers.md | 17 ++++++++++---- faq.md | 8 ++++++- management-api/trackers/overview.md | 27 +++++++++++++++------- 4 files changed, 44 insertions(+), 14 deletions(-) diff --git a/changelog.md b/changelog.md index f0ee3ae9..1fdca6a6 100644 --- a/changelog.md +++ b/changelog.md @@ -11,6 +11,12 @@ import TabItem from '@theme/TabItem'; We continuously add new features and enhancements, fix critical bugs, and regularly deploy changes to improve performance. Keep a tab of our latest updates on this page. +### 26 Nov 2021 +![api update](/img/api-update.png) +- **Added support for generating Trackers in Spanish Language** (LABS)
+[Read more here](/docs/streamingapi/code-snippets/receive-trackers-in-spanish). + + ### 22 Oct 2021 ![api update](/img/api-update.png) - **Added support for generating Summary for only the new Transcripts of a Conversation** (LABS)
diff --git a/conversation-api/api-reference/trackers.md b/conversation-api/api-reference/trackers.md index 8401f6f1..b480db0d 100644 --- a/conversation-api/api-reference/trackers.md +++ b/conversation-api/api-reference/trackers.md @@ -7,6 +7,8 @@ slug: /conversation-api/trackers import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::note In Beta Phase This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: @@ -17,13 +19,18 @@ This API allows you to get all the [Trackers](/docs/concepts/trackers) from your `GET https://api.symbl.ai/v1/conversations/{conversationId}/trackers-detected` +:::info Important +If you have not processed your conversation with the parameter `enableAllTracker=true` in the Async API, Trackers will not be detected. To learn why and understand how to use this parameter while processing your conversation, see [Consuming Trackers with Management API](/docs/management-api/trackers/overview#step-2-submit-files-using-async-api-with-enablealltrackers-flag) section. +::: + ### Request Headers -1. `Authorization` (Recommended) - This header should contain a valid Bearer token generated using the `token:generate` API Endpoint by passing in the credentials. You can read more about authentication [here](/docs/developer-tools/authentication). - -2. `X-API-KEY` (Legacy) - Use the `Authorization` header. This has been deprecated. This header should contain a valid authentication token generated using the `token:generate` API Endpoint by passing in the credentials. These can be obtained by signing up on the [Platform](https://platform.symbl.ai/). - -3. `Content-Type` (Optional) - This header must contain the MIME Type `application/json`. +Header Name | Required | Description +---------- | ------- | ------- | +```Authorization``` | Mandatory | `Bearer ` The token you get from our [authentication process](/docs/developer-tools/authentication). +```Content-Type ``` | Mandatory | `application/json` +```x-api-key``` | Optional | DEPRECATED. The JWT token you get from our [authentication process](/docs/developer-tools/authentication). + ### Example API Call diff --git a/faq.md b/faq.md index 89c93437..bbf37ed3 100644 --- a/faq.md +++ b/faq.md @@ -43,4 +43,10 @@ Our Async APIs support multiple popular languages. To see the complete list, go ### What happens when the Speaker Diarization and Speaker recognition per Channel are both set to "True"? -If the Diarization feature is set to true, it will take priority over Speaker recognition per Channel. +If the Diarization feature is set to `true`, it will take priority over Speaker recognition per Channel. + +### Can I run a Tracker one at a time? + +No, Trackers cannot be run one at a time. + +If you have not processed your conversation with the parameter `enableAllTracker=true` in the Async API, Trackers will not be detected. To learn why and understand how to use this parameter while processing your conversation, see [Consuming Trackers with Management API](/docs/management-api/trackers/overview#step-2-submit-files-using-async-api-with-enablealltrackers-flag) section. \ No newline at end of file diff --git a/management-api/trackers/overview.md b/management-api/trackers/overview.md index 55ab96e6..e5e0155a 100644 --- a/management-api/trackers/overview.md +++ b/management-api/trackers/overview.md @@ -35,6 +35,7 @@ To read about the capabilities of the Management API, see the [Management API](/ ### Step 1: Create Trackers +--- Create Trackers by sending a `POST` request to the Trackers Management API endpoint given below: @@ -72,6 +73,7 @@ This creates a Tracker and returns the following response. Note that every Track } ``` ### Step 2: Submit files using Async API with `enableAllTrackers` flag +--- When you send a recorded audio, video or text using [Async API](http://localhost:3000/docs/async-api/introduction), set **enableAllTrackers=True** and **POST** the file to Symbl. Given below is an example of a POST request to Async Audio API for processing an audio recording with `enableAllTrackers` set to `true`. By default this is set to `false`. @@ -81,32 +83,35 @@ POST "https://api.symbl.ai/v1/process/audio?enableAllTrackers=true" ``` :::note Specifying the "enableAllTrackers" parameter in the request -The `enableAllTrackers` parameter will enable detection of all the Trackers maintained for a Symbl’s account by the [Management API](#tracker-consumption-with-management-api). +The `enableAllTrackers` parameter must be sent mandatorily in the Async API to detect Trackers. The purpose of this flag is to enable detection of all the Trackers created with the [Management API](#tracker-consumption-with-management-api) that maintains your entities with Symbl at the account level. -`enableAllTrackers` accepts a boolean value which must be passed in the Async APIs either as a query param or in the request body depending on which Async API you are using. See the complete list below: +`enableAllTrackers` accepts a boolean value which must be passed in the Async APIs either as a query param or in the request body depending on which Async API you are using. See the complete list of Async APIs and how each accepts this parameter: | ---------- | ------- As a query-param | Async Audio File API, Async Video File API. In Request Body | Async Audio URL API, Async Video URL API, Async Text API. - ::: +On successful processing by the above mentioned API, you will get the `conversationId` and the `jobId` as shown below: -You will get the `conversationId` and the `jobId` as shown below: - -### Response +#### Response ```json { "conversationId": "6186250391257088", "jobId": "78422976-e461-41cf-ba35-20397d16619e" } ``` -👉   [Next, you can check the job status using the `GET` Job Status request.](/docs/async-api/overview/jobs-api/#get-job-status) +You can use the `jobId` to get the job status using the [Job Status API.](/docs/async-api/overview/jobs-api/#get-job-status) + +:::note +Ensure that you wait for the job to complete before proceeding to Step 3. +::: ### Step 3: Get detected messages containing Trackers +--- -Using the `conversation_id` from Step 2, you can `GET` the Trackers for the conversation. +Using the `conversationId` from Step 2, you can `GET` the Trackers for the conversation. ```shell GET "https://api.symbl.ai/v1/conversations/{{conversation_id}}/trackers" @@ -136,6 +141,9 @@ GET "https://api.symbl.ai/v1/conversations/{{conversation_id}}/trackers" ... ] ``` +:::caution Important +If the `conversationId` used in this Step is not processed with `enableAllTrackers=true` in the Async API, Trackers will not be detected. Using this flag as illustrated in Step 2 is mandatory. +::: ### Supported API Operations with Management API @@ -150,7 +158,9 @@ Delete Tracker| [`DELETE`v1/manage/tracker/{trackerId}](/management-api/trackers ## Consuming Trackers with Async APIs + ### Step 1: Create a Tracker +--- The first step is to create a Tracker with a set of phrases and keywords using Async APIs. @@ -212,6 +222,7 @@ After creating the Tracker, you can: 👉   [If any Trackers need to be updated, send a `PUT` request.](/docs/management-api/trackers/update-tracker) ### Step 2: Get the detected messages containing Trackers +--- Using the `conversation_id` you get from Step 1, you can `GET` the Trackers for the conversation. From 2d988931efa6f460e2c217f3827fdaa1a044debe Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Wed, 15 Dec 2021 13:27:35 +0530 Subject: [PATCH 53/64] 15-12-21 --- .../api-reference/put-all-conversation.md | 58 ++++--------------- conversation-api/api-reference/trackers.md | 44 +++++++------- conversation-api/concepts/trackers.md | 2 +- how-tos/get-sentiment-analysis.md | 2 + .../conversation-groups-intro.md | 40 ++----------- management-api/trackers/overview.md | 45 +++++++------- pre-built-ui/text-summary-ui.md | 4 +- pre-built-ui/video-summary-ui.md | 4 +- .../receive-trackers-in-spanish.md | 13 ++++- 9 files changed, 84 insertions(+), 128 deletions(-) diff --git a/conversation-api/api-reference/put-all-conversation.md b/conversation-api/api-reference/put-all-conversation.md index 2baa1375..f772ede6 100644 --- a/conversation-api/api-reference/put-all-conversation.md +++ b/conversation-api/api-reference/put-all-conversation.md @@ -100,6 +100,12 @@ exit() :::note +The following field can be updated/deleted: + +- `metadata` + + To update the `metadata` read about the requirements [here](/docs/management-api/conversation-groups/conversation-groups-intro/#step-2-add-metadata-to-conversation) + The following fields cannot be updated/deleted: - `id` @@ -113,20 +119,6 @@ Given below is an example of the request along with optional query parameters. F ```json { - "type": "meeting", - "name": "My Test Meeting", - "startTime": "2021-02-24T15:53:05.594Z", - "endTime": "2021-02-24T16:18:05.048Z", - "members": [ - { - "name": "John", - "email": "john@example.com" - }, - { - "name": "Mary", - "email": "mary@example.com" - } - ], "metadata": { "key": "value", "agentId": "johndoe" @@ -138,37 +130,11 @@ The updated Conversation object is returned in the response body. ```javascript { - "id": "4931769134481408", - "type": "meeting", - "name": "My Test Meeting", - "startTime": "2021-02-24T15:53:05.594Z", - "endTime": "2021-02-24T16:18:05.048Z", - "members": [ - { - "name": "John", - "email": "john@example.com" - }, - { - "name": "Mary", - "email": "mary@example.com" - } - ], - "metadata": { - "key": "value", - "agentId": "johndoe" - } + "id":"4931769134481408", + "metadata":{ + "key":"value", + "agentId":"johndoe" + } } ``` - -### Response Object - -Field | Description ----------- | ------- | -```id``` | The unique conversation identifier. This field cannot be updated. -```type``` | The conversation type. Default value is `meeting`. This field cannot be updated. -```name``` | The name of the conversation. -```startTime``` | DateTime value of when the conversation started. -```endTime``` | DateTime value of when the conversation ended. -```members``` | A list of member objects containing ID, name and email (if detected). - - +Metadata contains user-defined metadata key values which are used for labelling conversations. `agentid` is example for `key/value` pair. You can define the key/value pairs based on what you want to store in the metadata field. \ No newline at end of file diff --git a/conversation-api/api-reference/trackers.md b/conversation-api/api-reference/trackers.md index cb88028f..bc81247c 100644 --- a/conversation-api/api-reference/trackers.md +++ b/conversation-api/api-reference/trackers.md @@ -111,26 +111,30 @@ exit() ### Response ```json -[ - { - "id": "4527907378937856", - "name": "My Awesome Tracker", - "matches": [ - { - "messageRefs": [ - { - "id": "4670860273123328", - "text": "Wearing mask is a good safety measure.", - "offset": -1 - } - ], - "type": "vocabulary", - "value": "wear mask", - "insightRefs": [] - } - ] - } -] +{ + "type": "vocabulary", + "value": "Can you reiterate that one more time", + "messageRefs": [ + { + "id": "6428676305453056", + "text": "So I am not showing that here but you can have that, you know, for particular sentence and, you know, then aggregate based on the whole conversation.", + "offset": -1 + }, + { + "id": "6035928066818048", + "text": "Give that intent and name and that's it.", + "offset": -1 + } + ], + "insightRefs": [ + { + "text": "Yeah, and you So from sentiment analysis perspective, right?", + "offset": -1, + "type": "question", + "id": "5794360651153408" + } + ] +} ``` Let’s go over the members of the response body which contains the detected tracker objects: diff --git a/conversation-api/concepts/trackers.md b/conversation-api/concepts/trackers.md index 933582ee..4e9f55e8 100644 --- a/conversation-api/concepts/trackers.md +++ b/conversation-api/concepts/trackers.md @@ -97,4 +97,4 @@ Currently, Trackers is supported with Symbl’s Async APIs and Streaming APIs. - [Viewing detected Trackers with Async API](/docs/async-api/code-snippets/track-phrases-in-a-conversation/#view-detected-trackers) - [Detect Key Phrases with Streaming API](/docs/streamingapi/code-snippets/detect-key-phrases/#ontrackerresponse-json-response-example) - [How to create Trackers in Bulk](/docs/management-api/trackers/create-tracker#create-trackers-in-bulk) - +- [How to receive Trackers in Spanish with Streaming API](/docs/streamingapi/code-snippets/receive-trackers-in-spanish) \ No newline at end of file diff --git a/how-tos/get-sentiment-analysis.md b/how-tos/get-sentiment-analysis.md index 8801118c..7a197316 100644 --- a/how-tos/get-sentiment-analysis.md +++ b/how-tos/get-sentiment-analysis.md @@ -31,6 +31,8 @@ For getting Sentiment Analysis on your conversations in real-time, you can use o For getting Sentiment Analysis on recorded conversations, use our Async APIs. You can refer to the tutorial below for more information. - [Get Sentiment Analysis with Async API](/docs/async-api/code-snippets/how-to-use-sentiment-analysis/) +- [Sentiment Analysis on Messages](/docs/async-api/code-snippets/sentiment-analysis-on-messages) +- [Sentiment Analysis on Topics](/docs/async-api/code-snippets/sentiment-analysis-on-topics) diff --git a/management-api/conversation-groups/conversation-groups-intro.md b/management-api/conversation-groups/conversation-groups-intro.md index 04e7a9e4..cb7c76bc 100644 --- a/management-api/conversation-groups/conversation-groups-intro.md +++ b/management-api/conversation-groups/conversation-groups-intro.md @@ -81,23 +81,7 @@ To add metadata, modify an already processed conversation using Conversation API #### Request Body ```json -# This is an optional payload. You can pass only the metadata to receive similar results. { - "type": "meeting", - "name": "My Business Meeting", - "startTime": "2021-02-24T15:53:05.594Z", - "endTime": "2021-02-24T16:18:05.048Z", - "members": [ - { - "name": "John", - "email": "john@example.com" - }, - { - "name": "Mary", - "email": "mary@example.com" - } - ], -# This adds the metadata for Conversation Groups "metadata": { "key": "value", "agentId": "johndoe" @@ -109,25 +93,11 @@ To add metadata, modify an already processed conversation using Conversation API ```json { - "id": "4931769134481408", - "type": "meeting", - "name": "My Business Meeting", - "startTime": "2021-02-24T15:53:05.594Z", - "endTime": "2021-02-24T16:18:05.048Z", - "members": [ - { - "name": "John", - "email": "john@example.com" - }, - { - "name": "Mary", - "email": "mary@example.com" - } - ], - "metadata": { - "key": "value", - "agentId": "johndoe" - } + "id":"4931769134481408", + "metadata":{ + "key":"value", + "agentId":"johndoe" + } } ``` diff --git a/management-api/trackers/overview.md b/management-api/trackers/overview.md index e044961f..3c2601cc 100644 --- a/management-api/trackers/overview.md +++ b/management-api/trackers/overview.md @@ -114,27 +114,30 @@ GET "https://api.symbl.ai/v1/conversations/{{conversation_id}}/trackers-detected #### Response ```json -[ - { - "id": "4527907378937856", - "name": "Promotion Mention", - "matches": [ - { - "messageRefs": [ - { - "id": "4670860273123328", - "text": "We're running a sale right now", - "offset": -1 - } - ], - "type": "vocabulary", - "value": "run sale", - "insightRefs": [] - } - ] - }, - ... -] +{ + "type": "vocabulary", + "value": "Can you reiterate that one more time", + "messageRefs": [ + { + "id": "6428676305453056", + "text": "So I am not showing that here but you can have that, you know, for particular sentence and, you know, then aggregate based on the whole conversation.", + "offset": -1 + }, + { + "id": "6035928066818048", + "text": "Give that intent and name and that's it.", + "offset": -1 + } + ], + "insightRefs": [ + { + "text": "Yeah, and you So from sentiment analysis perspective, right?", + "offset": -1, + "type": "question", + "id": "5794360651153408" + } + ] +} ``` ### Supported API Operations with Management API diff --git a/pre-built-ui/text-summary-ui.md b/pre-built-ui/text-summary-ui.md index c80caa04..c2813a52 100644 --- a/pre-built-ui/text-summary-ui.md +++ b/pre-built-ui/text-summary-ui.md @@ -35,6 +35,6 @@ The Text Summary UI displays the following details: ### Tutorials - [Creating Text Summary UI](/docs/tutorials/pre-built-summary-ui/creating-text-summary-ui) -- [Turning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) +- [Tuning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) - [White label your Summary Page](/docs/tutorials/pre-built-summary-ui/whitelabeling-summary-ui) -- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) +- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) \ No newline at end of file diff --git a/pre-built-ui/video-summary-ui.md b/pre-built-ui/video-summary-ui.md index dd223b66..ba97d01b 100644 --- a/pre-built-ui/video-summary-ui.md +++ b/pre-built-ui/video-summary-ui.md @@ -73,6 +73,6 @@ The videoUrl only takes precedence when there is no Video present in the UI. ### Tutorials - [Creating Video Summary UI](/docs/tutorials/pre-built-summary-ui/creating-video-summary-ui) -- [Turning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) +- [Tuning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) - [White label your Summary Page](/docs/tutorials/pre-built-summary-ui/whitelabeling-summary-ui) -- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) +- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) \ No newline at end of file diff --git a/streamingapi/code-snippets/receive-trackers-in-spanish.md b/streamingapi/code-snippets/receive-trackers-in-spanish.md index 285a2bb3..66d65129 100644 --- a/streamingapi/code-snippets/receive-trackers-in-spanish.md +++ b/streamingapi/code-snippets/receive-trackers-in-spanish.md @@ -1,6 +1,6 @@ --- id: receive-trackers-in-spanish -title: Receive Trackers in Spanish +title: Receive Trackers in Spanish (Labs) --- import Tabs from '@theme/Tabs'; @@ -8,6 +8,17 @@ import TabItem from '@theme/TabItem'; --- +:::info Symbl Labs +This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. + + +You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. + +**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary. + +For any queries or feedback, please contact us at labs@symbl.ai. +::: + This tutorial goes over how you can use the Streaming API to receive Trackers of a conversation in the Spanish language. This example uses native JavaScript. :::note From 7ff7af2b4feea054e9bae3dd7d2e5354a473b020 Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Wed, 15 Dec 2021 14:25:35 +0530 Subject: [PATCH 54/64] Web SDK documentation --- .../api-reference/all-conversations.md | 10 +- streamingapi/reference/reference.md | 2 +- web-sdk/overview.md | 29 +- web-sdk/subscribe-real-time.md | 7 - ...nscribing-live-audio-through-microphone.md | 271 ++++++++++-------- web-sdk/web-sdk-html.md | 9 + web-sdk/web-sdk-reference.md | 11 + 7 files changed, 198 insertions(+), 141 deletions(-) create mode 100644 web-sdk/web-sdk-html.md diff --git a/conversation-api/api-reference/all-conversations.md b/conversation-api/api-reference/all-conversations.md index 1e23659c..f1d8fc13 100644 --- a/conversation-api/api-reference/all-conversations.md +++ b/conversation-api/api-reference/all-conversations.md @@ -102,12 +102,18 @@ exit()
-### Request Body +### Request Given below is an example of the request along with optional query parameters. For a complete list of query parameters and their description, see the [table](#query-parameters) below. ```java -GET https://api.symbl.ai/v1/conversations?limit=2&order=desc&sort=conversation.name&offset=2&startTime=2021-08-09T18:30:00.000Z&endTime=2021-08-13T18:30:00.000Z +GET https://api.symbl.ai/v1/conversations +?limit=2 +&order=desc +&sort=conversation.name +&offset=2 +&startTime=2021-08-09T18:30:00.000Z +&endTime=2021-08-13T18:30:00.000Z ``` ### Query Parameters diff --git a/streamingapi/reference/reference.md b/streamingapi/reference/reference.md index 69d2d1bb..38427ecb 100644 --- a/streamingapi/reference/reference.md +++ b/streamingapi/reference/reference.md @@ -13,7 +13,7 @@ import TabItem from '@theme/TabItem'; Symbl's Streaming API is based on WebSocket protocol and can be used for real-time use-cases where both the audio and its results from Symbl's back-end need to be available in real-time. :::info -Currently, Streaming API is supported only in English language. +Currently, Streaming API is supported in English language. The support for Spanish language is also available in the Labs environment. ::: ## Request Parameters diff --git a/web-sdk/overview.md b/web-sdk/overview.md index 8ccb9f48..ce5c0030 100644 --- a/web-sdk/overview.md +++ b/web-sdk/overview.md @@ -11,6 +11,14 @@ import TabItem from '@theme/TabItem'; The Symbl Web SDK provides access to the Symbl APIs for applications in the browser directly. +We have added the following capabilities in the Web SDK: + +- Connecting, +- Stopping, +- Muting, +- Unmuting and +- Subscribing. + > **Source Code**
Find the source code here: [https://github.com/symblai/symbl-web-sdk](https://github.com/symblai/symbl-web-sdk).
@@ -30,16 +38,20 @@ Find the source code here: [https://github.com/symblai/symbl-web-sdk](https://gi --- **To use the Symbl Web SDK,** -Include it via script tags in your HTML file: +1. Include the following script tags in your HTML file: ```html ``` -In case of a front-end web application using a framework such as React, import it in the ES2015 style: +2. In case of a front-end web application using a framework such as React, import it in the ES2015 style, as given below: ```bash import symbl from "@symblai/symbl-web-sdk"; ``` +To view a full example that shows the above, check out the links below: + +- HTML sample +- React app sample ## Initialization --- @@ -87,18 +99,7 @@ The Web SDK is also available as a part of [Symbl Labs](/docs/labs) with select ### Features in Labs -The following features are available in Labs for Web SDK. For more details, go to the [GitHub Readme]([https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs)): - -| Parameter | Required | Description | -| -------| ---------- | --------- | -|`disconnectonOnStopRequest` | Optional, default: true | If set to `false` the WebSocket will be set to a non-processing state if the `stop_request` event is set. In this state, the connection can be re-opened if the `start_request` event is sent. If `true` the WebSocket connection will close as normal. -|`disconnectOnStopRequestTimeout` | Optional | Accepts a value of 0 to 1800 seconds. Indicates how long this connection will remain in a non-processing state before timing out. | -|`noConnectionTimeout` | Optional | Accepts a value of 0 to 1800 seconds. Indicates how long a connection will remain active even when no one is connected. By using the same connectionId anyone can reconnect to this WebSocket before it times out completely.| -|`sourceNode` | Optional, default: null | For passing in an external `MediaStreamAudioSourceNode` object. By default the Web SDK will handle audio context and source nodes on it's own, though if you wish to handle that externally we've provided that option.| -|`config.encoding` | Optional, default: 'linear16' | Accepts either 'opus' or 'linear16'. For linear16, you must set the sampleRateHertz option. For opus the sampleRateHertz should always be 48000. | -| `handlers.ondevicechange` | Optional | By default Symbl Web SDK will provide the ondevicehandler logic, which just takes the new device and sends the sample rate over to our servers. If you wish to override this logic you can do so by passing an ondevicechange function into the handlers section of the config. You can assign a function to symbl.deviceChanged as a callback to when the event is fired. -| `reconnectOnError` | Optional, default: true | If true the Web SDK will attempt to reconnect to the WebSocket in case of error. - +The following features are available in Labs for Web SDK. For more details, go to the [GitHub Readme]([https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs)) ::: ## Tutorials diff --git a/web-sdk/subscribe-real-time.md b/web-sdk/subscribe-real-time.md index 5e6e1b61..75c05887 100644 --- a/web-sdk/subscribe-real-time.md +++ b/web-sdk/subscribe-real-time.md @@ -12,13 +12,6 @@ import TabItem from '@theme/TabItem'; The Symbl Web SDK lets you subscribe to real-time events when you connect to one of the Endpoints specified in the above sections. You must open this example in a different browser while the realtime transcription example is running. -These include: - -* Real-Time Transcription -* Real-Time Insights -* Real-Time Messages -* Real-Time Intents - The below example shows how to achieve this: ```js diff --git a/web-sdk/transcribing-live-audio-through-microphone.md b/web-sdk/transcribing-live-audio-through-microphone.md index 62793405..00618aeb 100644 --- a/web-sdk/transcribing-live-audio-through-microphone.md +++ b/web-sdk/transcribing-live-audio-through-microphone.md @@ -10,9 +10,19 @@ import TabItem from '@theme/TabItem'; --- -As a simple test of the Streaming API you can simply setup a live microphone and push the audio stream using the browser APIs to access the microphone. +To get transcriptions live setup a live microphone and push the audio stream using the browser APIs to access the microphone. -Initialize the SDK and connect via the built-in websocket connector. This will output the live transcription to the console. +On this page, the following capabilities are explained: + +- Connecting, +- Stopping, +- Muting, +- Unmuting and +- Subscribing. + +:::note +To see the code for Reconnecting function, go to [Reconnect to existing realtime Connection](/docs/web-sdk/reconnecting-real-time) page. +::: ### Initialize the SDK @@ -34,132 +44,159 @@ See the steps to get your API Credentials in the [Authentication](/docs/develope The `symbl.startRealtimeRequest` function creates a new AudioContext, so the call must be made on user interaction, such as a button click. ::: -```js -const id = btoa("symbl-ai-is-the-best"); - -const connectionConfig = { - id, - insightTypes: ['action_item', 'question'], - config: { - meetingTitle: 'My Test Meeting ' + id, - confidenceThreshold: 0.7, - timezoneOffset: 480, // Offset in minutes from UTC - languageCode: 'en-US', - // sampleRateHertz: 48000 - }, - speaker: { - // Optional, if not specified, will simply not send an email in the end. - userId: '', // Update with valid email - name: '' - }, - handlers: { - /** - * This will return live speech-to-text transcription of the call. - */ - onSpeechDetected: (data) => { - if (data) { - const {punctuated} = data - console.log('Live: ', punctuated && punctuated.transcript) - console.log(''); - } - // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); - }, - /** - * When processed messages are available, this callback will be called. - */ - onMessageResponse: (data) => { - // console.log('onMessageResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects an insight, this callback will be called. - */ - onInsightResponse: (data) => { - // console.log('onInsightResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects a topic, this callback will be called. - */ - onTopicResponse: (data) => { - // console.log('onTopicResponse', JSON.stringify(data, null, 2)) +#### HTML Sample + +```html + + + + + +​ + + + + +​ + +​ + +
+

Live transcription:

+

+
+​ + + + ``` -### Full Code Sample +### Reference -```js -symbl.init({ - appId: '', - appSecret: '', - // accessToken: '', // can be used instead of appId and appSecret - // basePath: '', -}); - -const id = btoa("symbl-ai-is-the-best"); - -const connectionConfig = { - id, - insightTypes: ['action_item', 'question'], - config: { - meetingTitle: 'My Test Meeting ' + id, - confidenceThreshold: 0.7, - timezoneOffset: 480, // Offset in minutes from UTC - languageCode: 'en-US', - // sampleRateHertz: 48000 - }, - speaker: { - // Optional, if not specified, will simply not send an email in the end. - userId: '', // Update with valid email - name: '' - }, - handlers: { - /** - * This will return live speech-to-text transcription of the call. - */ - onSpeechDetected: (data) => { - if (data) { - const {punctuated} = data - console.log('Live: ', punctuated && punctuated.transcript) - console.log(''); - } - // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); - }, - /** - * When processed messages are available, this callback will be called. - */ - onMessageResponse: (data) => { - // console.log('onMessageResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects an insight, this callback will be called. - */ - onInsightResponse: (data) => { - // console.log('onInsightResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects a topic, this callback will be called. - */ - onTopicResponse: (data) => { - // console.log('onTopicResponse', JSON.stringify(data, null, 2)) - } - } -}; +Read about the Streaming API parameters: -(async () => { - const connection = await symbl.startRealtimeRequest(connectionConfig, true); -})(); -``` +    👉   [insightTypes](/docs/streaming-api/api-reference#main-message-body)
+    👉   [config](/docs/streaming-api/api-reference#config)
+    👉   [meetingTitle](/docs/streaming-api/api-reference#config)
+    👉   [confidenceThreshold](/docs/streaming-api/api-reference#config)
+    👉   [timezoneOffset](/docs/streaming-api/api-reference#config)
+    👉   [languageCode](/docs/streaming-api/api-reference#config)
+    👉   [sampleRateHertz](/docs/streaming-api/api-reference#speech-recognition)
+    👉   [speaker](/docs/streaming-api/api-reference#speaker)
+    👉   [userId](/docs/streaming-api/api-reference#speaker)
+    👉   [name](/docs/streaming-api/api-reference#speaker)
+    👉   [startRealtimeRequest](/docs/web-sdk/web-sdk-reference#startrealtimerequest)
+    👉   [subscribeToStream](/docs/web-sdk/web-sdk-reference#subscribetostream)
-Read more about the supported Event Handlers: +Read more about the supported [Event Handlers](/docs/web-sdk/web-sdk-reference#event-handlers):     👉   [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
    👉   [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
    👉   [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
    👉   [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse) + \ No newline at end of file diff --git a/web-sdk/web-sdk-html.md b/web-sdk/web-sdk-html.md new file mode 100644 index 00000000..81a3cb56 --- /dev/null +++ b/web-sdk/web-sdk-html.md @@ -0,0 +1,9 @@ +--- +id: web-sdk-html +title: Web SDK HTML sample +slug: /web-sdk/web-sdk-html +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- diff --git a/web-sdk/web-sdk-reference.md b/web-sdk/web-sdk-reference.md index 7a715356..bb409079 100644 --- a/web-sdk/web-sdk-reference.md +++ b/web-sdk/web-sdk-reference.md @@ -273,3 +273,14 @@ This callback provides you with any of the detected topics in real-time as they "type": "topic" }] ``` +### subscribeToStream + +This callback allows you to subcribe + +The `subscribeToStream` function allows you to subscribe to existing streaming connection in read-only. It takes the following parameters: + +| Parameters | Type | Example | +| ---------- | ------- | ------- | +| `id` | String | Connection ID created on connection `init`| + +This is a function of our [Subscribe API](/docs/subscribe-api). \ No newline at end of file From b70a5597441ab90f52d742b25cd2abc38f78e3ff Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Wed, 15 Dec 2021 15:45:28 +0530 Subject: [PATCH 55/64] 15-12 (Updated) --- pre-built-ui/text-summary-ui.md | 2 +- pre-built-ui/video-summary-ui.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pre-built-ui/text-summary-ui.md b/pre-built-ui/text-summary-ui.md index c2813a52..511ef829 100644 --- a/pre-built-ui/text-summary-ui.md +++ b/pre-built-ui/text-summary-ui.md @@ -35,6 +35,6 @@ The Text Summary UI displays the following details: ### Tutorials - [Creating Text Summary UI](/docs/tutorials/pre-built-summary-ui/creating-text-summary-ui) -- [Tuning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) +- [Tuning your Summary Page](/docs/tutorials/pre-built-ui/tuning-summary-page) - [White label your Summary Page](/docs/tutorials/pre-built-summary-ui/whitelabeling-summary-ui) - [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) \ No newline at end of file diff --git a/pre-built-ui/video-summary-ui.md b/pre-built-ui/video-summary-ui.md index ba97d01b..a0a1733f 100644 --- a/pre-built-ui/video-summary-ui.md +++ b/pre-built-ui/video-summary-ui.md @@ -73,6 +73,6 @@ The videoUrl only takes precedence when there is no Video present in the UI. ### Tutorials - [Creating Video Summary UI](/docs/tutorials/pre-built-summary-ui/creating-video-summary-ui) -- [Tuning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) +- [Tuning your Summary Page](/docs/tutorials/pre-built-ui/tuning-summary-page) - [White label your Summary Page](/docs/tutorials/pre-built-summary-ui/whitelabeling-summary-ui) - [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) \ No newline at end of file From d07843a714731e74e0c90ba2b2d05ca889c42b06 Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Thu, 16 Dec 2021 09:20:23 +0530 Subject: [PATCH 56/64] Removes Web SDK --- changelog.md | 7 - web-sdk/labs-features.md | 158 ---------- .../muting-and-unmuting-connected-device.md | 13 - web-sdk/overview.md | 129 -------- ...necting-to-existing-realtime-connection.md | 79 ----- web-sdk/stopping-realtime-connection.md | 18 -- web-sdk/subscribe-real-time.md | 36 --- ...nscribing-live-audio-through-microphone.md | 202 ------------- web-sdk/web-sdk-html.md | 9 - web-sdk/web-sdk-reference.md | 286 ------------------ 10 files changed, 937 deletions(-) delete mode 100644 web-sdk/labs-features.md delete mode 100644 web-sdk/muting-and-unmuting-connected-device.md delete mode 100644 web-sdk/overview.md delete mode 100644 web-sdk/reconnecting-to-existing-realtime-connection.md delete mode 100644 web-sdk/stopping-realtime-connection.md delete mode 100644 web-sdk/subscribe-real-time.md delete mode 100644 web-sdk/transcribing-live-audio-through-microphone.md delete mode 100644 web-sdk/web-sdk-html.md delete mode 100644 web-sdk/web-sdk-reference.md diff --git a/changelog.md b/changelog.md index e0b44bff..4a32f5e2 100644 --- a/changelog.md +++ b/changelog.md @@ -17,13 +17,6 @@ We continuously add new features and enhancements, fix critical bugs, and regula - **Added support for generating Trackers in Spanish Language** (LABS)
[Read more here](/docs/streamingapi/code-snippets/receive-trackers-in-spanish). - -![sdk](/img/sdk-icon.png) -- **Web SDK Availability** (BETA + LABS)
-Availability of web based JavaScript SDK for Streaming API. Some of the features include transcribing real-time audio, connecting, muting/unmutung, reconnecting, subscribing to events and stopping connections. -[Read more here](/docs/web-sdk/overview). - - ### 22 Oct 2021 ![api update](/img/api-update.png) - **Added support for generating Summary for only the new Transcripts of a Conversation** (LABS)
diff --git a/web-sdk/labs-features.md b/web-sdk/labs-features.md deleted file mode 100644 index 936dd867..00000000 --- a/web-sdk/labs-features.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -id: web-sdk-labs -title: Web SDK (Labs) -sidebar_label: Labs Features - ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - - -You can use labs features by setting your basePath in the init call to `https://api-labs.symbl.ai`. - -## New Configurations - -| Parameter | Required | Description | -| -------| ---------- | --------- | -|`disconnectonOnStopRequest` | Optional, default: true | If set to `false` the WebSocket will be set to a non-processing state if the `stop_request` event is set. In this state, the connection can be re-opened if the `start_request` event is sent. If `true` the WebSocket connection will close as normal. -|`disconnectOnStopRequestTimeout` | Optional | Accepts a value of 0 to 1800 seconds. Indicates how long this connection will remain in a non-processing state before timing out. | -|`noConnectionTimeout` | Optional | Accepts a value of 0 to 1800 seconds. Indicates how long a connection will remain active even when no one is connected. By using the same connectionId anyone can reconnect to this WebSocket before it times out completely.| -|`sourceNode` | Optional, default: null | For passing in an external `MediaStreamAudioSourceNode` object. By default the Web SDK will handle audio context and source nodes on it's own, though if you wish to handle that externally we've provided that option.| -|`config.encoding` | Optional, default: 'linear16' | Accepts either 'opus' or 'linear16'. For linear16, you must set the sampleRateHertz option. For opus the sampleRateHertz should always be 48000. | -| `handlers.ondevicechange` | Optional | By default Symbl Web SDK will provide the ondevicehandler logic, which just takes the new device and sends the sample rate over to our servers. If you wish to override this logic you can do so by passing an ondevicechange function into the handlers section of the config. You can assign a function to symbl.deviceChanged as a callback to when the event is fired. -| `reconnectOnError` | Optional, default: true | If true the Web SDK will attempt to reconnect to the WebSocket in case of error. - -### Example - -```js -const id = btoa("symbl-ai-is-the-best"); - -const connectionConfig = { - id, - insightTypes: ['action_item', 'question'], - disonnectOnStopRequest: false, - disconnectOnStopRequestTimeout: 300, - noConnectionTimeout: 300, - sourceNode: sourceNode, - reconnectOnError: true, - config { - encoding: 'opus', - sampleRateHertz: 48000 - }, - handlers: { - ondevicechange: () => { - alert('device changed!'); - }, - ... - } - ... -} - -... - -// Creates the WebSocket in a non-processing state -const stream = await symbl.createStream(connectionConfig); - -// Send the start request -await symbl.unmute(stream); -``` - -### Using createStream to start a Realtime Request -Creating a stream using `symbl.startRealtimeRequest(config)` has been deprecated in favor of `symbl.createStream(config)`. For `createStream`, the WebSocket is started in a non processing state. You must send the start request before processing any audio. - -The `createStream` function returns a stream object. In order to start the connection you can call `symbl.unmute(stream)`. Unmute will send the start request and start the audio streaming. - -### Using Mute/Unmute to Pause a Connection -If you set the `disconnectOnStopRequest` flag to false you can use `symbl.mute(stream)` and `symbl.unmute(stream)` to suspend and resume the connection. Muting the connection makes it so you're not being billed during times of silence. - -#### unmute(stream) -Receive the stream received from createStream as argument. Unmutes the audio by setting gain value to 1. If disconnectOnStopRequest config is set to false the start request will be sent to the Websocket and the audio context will start. - -##### mute(stream) -Receive the stream received from createStream as argument. Mutes the audio by setting gain value to 0. If disconnectOnStopRequest config is set to false the stop will be sent to the Websocket and the audio context will be suspended. - -### Use disconnectOnStopRequest to Pause and Resume a Stream -```js -symbl.init({ - appId: '', - appSecret: '', - // accessToken: '', // can be used instead of appId and appSecret - basePath: 'https://api-labs.symbl.ai', -}); - -const id = btoa("symbl-ai-is-the-best"); - -const connectionConfig = { - id, - insightTypes: ['action_item', 'question'], - disconnectOnStopRequest: false, - disconnectOnStopRequestTimeout: 300, - noConnectionTimeout: 300, - config: { - meetingTitle: 'My Test Meeting ' + id, - confidenceThreshold: 0.7, - timezoneOffset: 480, // Offset in minutes from UTC - languageCode: 'en-US', - encoding: 'opus', - sampleRateHertz: 48000 - }, - speaker: { - // Optional, if not specified, will simply not send an email in the end. - userId: '', // Update with valid email - name: '' - }, - handlers: { - /** - * This will return live speech-to-text transcription of the call. - */ - onSpeechDetected: (data) => { - if (data) { - const {punctuated} = data - console.log('Live: ', punctuated && punctuated.transcript) - console.log(''); - } - // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); - }, - /** - * When processed messages are available, this callback will be called. - */ - onMessageResponse: (data) => { - // console.log('onMessageResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects an insight, this callback will be called. - */ - onInsightResponse: (data) => { - // console.log('onInsightResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects a topic, this callback will be called. - */ - onTopicResponse: (data) => { - // console.log('onTopicResponse', JSON.stringify(data, null, 2)) - } - } -}; - -(async () => { - // Creates the WebSocket in a non-processing state - const stream = await symbl.createStream(connectionConfig); - - // Send the start request - await symbl.unmute(stream); - - // Suspend the stream after 10 seconds - window.setTimeout(() => { - await symbl.mute(stream); - }, 10000) - - // Re-send the start request to resume the stream after another 10 seconds - window.setTimeout(() => { - await symbl.unmute(stream); - }, 10000) -})(); - -``` \ No newline at end of file diff --git a/web-sdk/muting-and-unmuting-connected-device.md b/web-sdk/muting-and-unmuting-connected-device.md deleted file mode 100644 index 6c299a41..00000000 --- a/web-sdk/muting-and-unmuting-connected-device.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -id: muting-and-unmuting-connected-device -title: Muting and Unmuting Connected Device (Beta) -sidebar_label: Muting and Unmuting Connected Device - ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - -You can mute and unmute the connected device by simply calling `symbl.mute()` or `symbl.unmute()`. diff --git a/web-sdk/overview.md b/web-sdk/overview.md deleted file mode 100644 index ce5c0030..00000000 --- a/web-sdk/overview.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -id: web-sdk -title: Symbl Web SDK (Beta) -sidebar_label: Introduction -slug: /web-sdk/overview ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - -The Symbl Web SDK provides access to the Symbl APIs for applications in the browser directly. - -We have added the following capabilities in the Web SDK: - -- Connecting, -- Stopping, -- Muting, -- Unmuting and -- Subscribing. - -> **Source Code**
-Find the source code here: [https://github.com/symblai/symbl-web-sdk](https://github.com/symblai/symbl-web-sdk).
- - -## Supported Browsers ---- - - |-- | Chrome | Edge Firefox | Firefox | Safari | - | -------| ---------- | ------- | ----- | ------- | - | macOS | ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) | - | Windows | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png) | | - | Linux | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | - | iOS | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) | - | Android | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) | - -## Setup ---- -**To use the Symbl Web SDK,** - -1. Include the following script tags in your HTML file: - -```html - -``` -2. In case of a front-end web application using a framework such as React, import it in the ES2015 style, as given below: - -```bash -import symbl from "@symblai/symbl-web-sdk"; -``` -To view a full example that shows the above, check out the links below: - -- HTML sample -- React app sample - -## Initialization ---- -The `init` authenticates you to use the Symbl API using the provided authentication credentials. To get authentication credentials (App ID and Secret), follow the steps given in the [Authentication](/docs/developer-tools/authentication#step-1-get-your-api-credentials) page. - -You can authenticate: - -- [Using your API Credentials](#authenticate-using-api-credentials) - -                or - -- [Using your Auth Token](#authenticate-using-token) - -### Authenticate using API Credentials - -Use the code given below to authenticate using your App ID and App Secret. - -```js -sdk.init({ - // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai - appId: APP_ID, - appSecret: APP_SECRET, - basePath: 'https://api.symbl.ai' -}) -.then(() => console.log('SDK Initialized.')) -.catch(err => console.error('Error in initialization.', err)); - ``` - -### Authenticate using Token - -Use the code given below to authenticate using the Auth Token. To generate the Auth Token follow the Steps given in the [Authentication](/docs/developer-tools/authentication#step-2-generate-the-access-token) Page. - -```js -sdk.init({ - accessToken: ACCESS_TOKEN_HERE, - basePath: 'https://api.symbl.ai' -}) -.then(() => console.log('SDK Initialized.')) -.catch(err => console.error('Error in initialization.', err)); -``` - - -:::note Web SDK in Labs -The Web SDK is also available as a part of [Symbl Labs](/docs/labs) with select features. You can find the Web SDK Labs documentation here: [https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs) and the source code here: [https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs). - -### Features in Labs - -The following features are available in Labs for Web SDK. For more details, go to the [GitHub Readme]([https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs)) -::: - -## Tutorials ---- -We have prepared a list of tutorials to help you understand how to use the Web SDK. - -* [Transcribing Live Audio Input through Microphone](/docs/web-sdk/transcribing-live-audio-through-microphone) - - -### Code Snippets ---- - -* [Subscribe to real-time Events](/docs/web-sdk/subscribe-real-time) -* [Reconnecting to an Existing Real-time Connection](/docs/web-sdk/reconnecting-real-time) -* [Muting and Unmuting the Connected Device](/docs/web-sdk/muting-and-unmuting-connected-device) -* [Stopping Real-time Connection](/docs/web-sdk/stopping-real-time) - - -### Web SDK Reference ---- -The supported events for the Web SDK are listed below: - -* [Event Handlers](/docs/javascript-sdk/reference#event-handlers-1) - * [onSpeechDetected](/docs/javascript-sdk/reference#onspeechdetected) - * [onMessageResponse](/docs/javascript-sdk/reference#onmessageresponse) - * [onInsightResponse](/docs/javascript-sdk/reference#oninsightresponse) - * [onTopicResponse](/docs/javascript-sdk/reference#ontopicresponse) diff --git a/web-sdk/reconnecting-to-existing-realtime-connection.md b/web-sdk/reconnecting-to-existing-realtime-connection.md deleted file mode 100644 index 020d5d35..00000000 --- a/web-sdk/reconnecting-to-existing-realtime-connection.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -id: reconnecting-real-time -title: Reconnecting to an Existing Real-time Connection (Beta) -sidebar_label: Reconnecting to an Existing Real-time Connection -slug: /web-sdk/reconnecting-real-time ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - -In the case that a user closes their browser or has an interruption in their WebSocket connection you can use the `store` object to grab the Connection ID you last used. - -```js -const id = symbl.store.get('connectionID'); - -const connectionConfig = { - id, - insightTypes: ['action_item', 'question'], - config: { - meetingTitle: 'My Test Meeting ' + id, - confidenceThreshold: 0.7, - timezoneOffset: 480, // Offset in minutes from UTC - languageCode: 'en-US', - sampleRateHertz: 44100 - }, - speaker: { - // Optional, if not specified, will simply not send an email in the end. - userId: '', // Update with valid email - name: '' - }, - handlers: { - /** - * This will return live speech-to-text transcription of the call. - */ - onSpeechDetected: (data) => { - if (data) { - const {punctuated} = data - console.log('Live: ', punctuated && punctuated.transcript) - console.log(''); - } - // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); - }, - /** - * When processed messages are available, this callback will be called. - */ - onMessageResponse: (data) => { - // console.log('onMessageResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects an insight, this callback will be called. - */ - onInsightResponse: (data) => { - // console.log('onInsightResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects a topic, this callback will be called. - */ - onTopicResponse: (data) => { - // console.log('onTopicResponse', JSON.stringify(data, null, 2)) - } - } -}; - -(async () => { - const connection = await symbl.startRealtimeRequest(connectionConfig, true); -})(); -``` - -The `startRealtimeRequest` connects to a Streaming API Web Socket endpoint using the provided configuration options. Read more about `startRealtimeRequest` [here](/docs/web-sdk/web-sdk-reference#startrealtimerequest). - -Read about the Streaming API parameters for `connectionConfig` [here](/docs/streaming-api/api-reference/#request-parameters). - -Read more about the supported Event Handlers: - -    👉   [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
-    👉   [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
-    👉   [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
-    👉   [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse) \ No newline at end of file diff --git a/web-sdk/stopping-realtime-connection.md b/web-sdk/stopping-realtime-connection.md deleted file mode 100644 index a9418d98..00000000 --- a/web-sdk/stopping-realtime-connection.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -id: stopping-real-time -title: Stopping Real-time Connection (Beta) -sidebar_label: Stopping Real-time Connection -slug: /web-sdk/stopping-real-time ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - -In order to end the connection to the realtime WebSocket you'll need to use the following command with your connection object: - -```js -symbl.stopRequest(connection); -``` - -It is recommended to always end the connection programmatically if you do not sever the connection as you could end up using more minutes of time than intended. \ No newline at end of file diff --git a/web-sdk/subscribe-real-time.md b/web-sdk/subscribe-real-time.md deleted file mode 100644 index 75c05887..00000000 --- a/web-sdk/subscribe-real-time.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -id: web-subscribe-real-time -title: Subscribing to an Existing Real-time Connection (Beta) -sidebar_label: Subscribe to an Existing Real-time Connection -slug: /web-sdk/subscribe-real-time ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - -The Symbl Web SDK lets you subscribe to real-time events when you connect to one of the Endpoints specified in the above sections. -You must open this example in a different browser while the realtime transcription example is running. - -The below example shows how to achieve this: - -```js -symbl.init({ - appId: '', - appSecret: '', - // accessToken: '', // can be used instead of appId and appSecret - // basePath: '', -}); - -const id = btoa("symbl-ai-is-the-best"); - -symbl.subscribeToStream(id, (data) => { - console.log('data:', data); -}) -``` - -The `subscribeToStream` function allows you to subscribe to existing streaming connection in read-only. It takes the following parameters: - -| Parameters | Type | Example | -| ---------- | ------- | ------- | -| `id` | String | Connection ID created on connection `init`| diff --git a/web-sdk/transcribing-live-audio-through-microphone.md b/web-sdk/transcribing-live-audio-through-microphone.md deleted file mode 100644 index 00618aeb..00000000 --- a/web-sdk/transcribing-live-audio-through-microphone.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -id: transcribing-live-audio-through-microphone -title: Transcribing Live Audio through Microphone (Beta) -sidebar_label: Transcribing Live Audio through Microphone - ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - -To get transcriptions live setup a live microphone and push the audio stream using the browser APIs to access the microphone. - -On this page, the following capabilities are explained: - -- Connecting, -- Stopping, -- Muting, -- Unmuting and -- Subscribing. - -:::note -To see the code for Reconnecting function, go to [Reconnect to existing realtime Connection](/docs/web-sdk/reconnecting-real-time) page. -::: - -### Initialize the SDK - -```js -symbl.init({ - appId: '', - appSecret: '', - // accessToken: '', // can be used instead of appId and appSecret - // basePath: '', -}); -``` - -You can get the `appId` and `appSecret` from the [Symbl Platform](https://platform.symbl.ai). -See the steps to get your API Credentials in the [Authentication](/docs/developer-tools/authentication) section. - -### Start the Connection and pass Configuration Options - -:::note -The `symbl.startRealtimeRequest` function creates a new AudioContext, so the call must be made on user interaction, such as a button click. -::: - -#### HTML Sample - -```html - - - - - -​ - - - - -​ - -​ - -
-

Live transcription:

-

-
-​ - - - -``` - -### Reference - -Read about the Streaming API parameters: - -    👉   [insightTypes](/docs/streaming-api/api-reference#main-message-body)
-    👉   [config](/docs/streaming-api/api-reference#config)
-    👉   [meetingTitle](/docs/streaming-api/api-reference#config)
-    👉   [confidenceThreshold](/docs/streaming-api/api-reference#config)
-    👉   [timezoneOffset](/docs/streaming-api/api-reference#config)
-    👉   [languageCode](/docs/streaming-api/api-reference#config)
-    👉   [sampleRateHertz](/docs/streaming-api/api-reference#speech-recognition)
-    👉   [speaker](/docs/streaming-api/api-reference#speaker)
-    👉   [userId](/docs/streaming-api/api-reference#speaker)
-    👉   [name](/docs/streaming-api/api-reference#speaker)
-    👉   [startRealtimeRequest](/docs/web-sdk/web-sdk-reference#startrealtimerequest)
-    👉   [subscribeToStream](/docs/web-sdk/web-sdk-reference#subscribetostream)
- - -Read more about the supported [Event Handlers](/docs/web-sdk/web-sdk-reference#event-handlers): - -    👉   [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
-    👉   [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
-    👉   [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
-    👉   [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse) - - \ No newline at end of file diff --git a/web-sdk/web-sdk-html.md b/web-sdk/web-sdk-html.md deleted file mode 100644 index 81a3cb56..00000000 --- a/web-sdk/web-sdk-html.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -id: web-sdk-html -title: Web SDK HTML sample -slug: /web-sdk/web-sdk-html ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- diff --git a/web-sdk/web-sdk-reference.md b/web-sdk/web-sdk-reference.md deleted file mode 100644 index bb409079..00000000 --- a/web-sdk/web-sdk-reference.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -id: web-sdk-reference -title: Web SDK Reference -slug: /web-sdk/web-sdk-reference ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - ---- - - -## Public Methods - -### init - -```init (String appId, String appSecret)``` - -Authenticates with the Symbl API using the provided authentication credentials. - -#### Parameters - -Name | Description ------|------------ -`appId` | The Symbl Application ID you get from the [Symbl Platform](https://platform.symbl.ai) -`appSecret` | The Symbl Application Secret Token you get from the [Symbl Platform](https://platform.symbl.ai) -`basePath` | The base path of the endpoint. By default it is `https://api.symbl.ai`. -`accessToken` | The Symbl authentication Token you get from your `appId` and `appSecret`. This is an optional parameter you can use to authenticate using auth Token rather than the App ID and App Secret. See sample code [here](/docs/javascript-sdk/introduction#authenticate-using-token). - -#### Returns - -A Promise which is resolved once the API is connected and authenticated with Symbl. - -#### Code Example - -```js -sdk.init({ - // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai - appId: APP_ID, - appSecret: APP_SECRET, - basePath: 'https://api.symbl.ai' -}) -.then(() => console.log('SDK Initialized.')) -.catch(err => console.error('Error in initialization.', err)); -``` -### startRealtimeRequest - -```startRealtimeRequest ( options)``` - -Connects to a [Streaming API](/docs/streamingapi/overview/introduction) Web Socket endpoint using the provided configuration options. - -#### Parameters - -Name | Description ------|------------ -`options` | Options specified for the [Streaming API Configuration Object](https://docs.symbl.ai/docs/streaming-api/api-reference#request-parameters). - -#### Returns - -A Promise which is resolved once real-time request has been established. - -## Event Handlers - -When connecting using [`startRealtimeRequest`](#startRealtimeRequest), you can pass various handlers in the configuration options which be called if the specific event attached to the handler is fired. - -#### Code Example - -```js -handlers: { - /** - * This will return live speech-to-text transcription of the call. - */ - onSpeechDetected: (data) => { - console.log(JSON.stringify(data)) - if (data) { - const {punctuated} = data - console.log('Live: ', punctuated && punctuated.transcript) - } - }, - /** - * When processed messages are available, this callback will be called. - */ - onMessageResponse: (data) => { - console.log('onMessageResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects an insight, this callback will be called. - */ - onInsightResponse: (data) => { - console.log('onInsightResponse', JSON.stringify(data, null, 2)) - }, - /** - * When Symbl detects a topic, this callback will be called. - */ - onTopicResponse: (data) => { - console.log('onTopicResponse', JSON.stringify(data, null, 2)) - } -} -``` - -### onSpeechDetected - -To retrieve the real-time transcription results as soon as they are detected. You can use this callback to render live transcription which is specific to the speaker of this audio stream. - -#### onSpeechDetected JSON Response Example - -```js -{ - "type": "recognition_result", - "isFinal": true, - "payload": { - "raw": { - "alternatives": [{ - "words": [{ - "word": "Hello", - "startTime": { - "seconds": "3", - "nanos": "800000000" - }, - "endTime": { - "seconds": "4", - "nanos": "200000000" - } - }, { - "word": "world.", - "startTime": { - "seconds": "4", - "nanos": "200000000" - }, - "endTime": { - "seconds": "4", - "nanos": "800000000" - } - }], - "transcript": "Hello world.", - "confidence": 0.9128385782241821 - }] - } - }, - "punctuated": { - "transcript": "Hello world." - }, - "user": { - "userId": "emailAddress", - "name": "John Doe", - "id": "23681108-355b-4fc3-9d94-ed47dd39fa56" - } -} -``` - -### onMessageResponse - -This callback function contains the "finalized" transcription data for this speaker and if used with multiple streams with other speakers this callback would also provide their messages. - -The "finalized" messages mean that the automatic speech recognition has finalized the state of this part of transcription and has declared it "final". Therefore, this transcription will be more accurate than [`onSpeechDetected`](#onspeechdetected). - -#### onMessageResponse JSON Response Example - -```js -[{ - "from": { - "id": "0a7a36b1-047d-4d8c-8958-910317ed9edc", - "name": "John Doe", - "userId": "emailAddress" - }, - "payload": { - "content": "Hello world.", - "contentType": "text/plain" - }, - "id": "59c224c2-54c5-4762-9582-961bf250b478", - "channel": { - "id": "realtime-api" - }, - "metadata": { - "disablePunctuation": true, - "timezoneOffset": 480, - "originalContent": "Hello world.", - "words": "[{\"word\":\"Hello\",\"startTime\":\"2021-02-04T20:34:59.029Z\",\"endTime\":\"2021-02-04T20:34:59.429Z\"},{\"word\":\"world.\",\"startTime\":\"2021-02-04T20:34:59.429Z\",\"endTime\":\"2021-02-04T20:35:00.029Z\"}]", - "originalMessageId": "59c224c2-54c5-4762-9582-961bf250b478" - }, - "dismissed": false, - "duration": { - "startTime": "2021-02-04T20:34:59.029Z", - "endTime": "2021-02-04T20:35:00.029Z" - } -}] -``` - -### onInsightResponse - -This callback provides you with any of the detected insights in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every speaker's insights in case of multiple streams. - -#### onInsightResponse JSON Response Example - -```json -[{ - "id": "94020eb9-b688-4d56-945c-a7e5282258cc", - "confidence": 0.9909798145016999, - "messageReference": { - "id": "94020eb9-b688-4d56-945c-a7e5282258cc" - }, - "hints": [{ - "key": "informationScore", - "value": "0.9782608695652174" - }, { - "key": "confidenceScore", - "value": "0.9999962500210938" - }, { - "key": "comprehensionScore", - "value": "0.9983848333358765" - }], - "type": "action_item", - "assignee": { - "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0", - "name": "John Doe", - "userId": "emailAddress" - }, - "dueBy": { - "value": "2021-02-05T00:00:00-07:00" - }, - "tags": [{ - "type": "date", - "text": "today", - "beginOffset": 39, - "value": { - "value": { - "datetime": "2021-02-05" - } - } - }, { - "type": "person", - "text": "John Doe", - "beginOffset": 8, - "value": { - "value": { - "name": "John Doe", - "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0", - "assignee": true, - "userId": "emailAddress" - } - } - }], - "dismissed": false, - "payload": { - "content": "Perhaps John Doe can submit the report today.", - "contentType": "text/plain" - }, - "from": { - "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0", - "name": "John Doe", - "userId": "emailAddress" - } -}] -``` - -### onTopicResponse - -This callback provides you with any of the detected topics in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every topic in case of multiple streams. - -#### onTopicResponse JSON Response Example - -```json -[{ - "id": "e69a5556-6729-11eb-ab14-2aee2deabb1b", - "messageReferences": [{ - "id": "0df44422-0248-47e9-8814-e87f63404f2c", - "relation": "text instance" - }], - "phrases": "auto insurance", - "rootWords": [{ - "text": "auto" - }], - "score": 0.9, - "type": "topic" -}] -``` -### subscribeToStream - -This callback allows you to subcribe - -The `subscribeToStream` function allows you to subscribe to existing streaming connection in read-only. It takes the following parameters: - -| Parameters | Type | Example | -| ---------- | ------- | ------- | -| `id` | String | Connection ID created on connection `init`| - -This is a function of our [Subscribe API](/docs/subscribe-api). \ No newline at end of file From c689b149eee819c2d2b65c396e0c321ee8be1d54 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Thu, 16 Dec 2021 18:00:43 +0530 Subject: [PATCH 57/64] DC-269 --- api-reference/experience-api/post-video-summary-ui.md | 2 +- .../conversation-groups/update-conversation-groups.md | 1 + pre-built-ui/text-summary-ui.md | 4 ++-- pre-built-ui/video-summary-ui.md | 4 ++-- tutorials.md | 6 +++--- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/api-reference/experience-api/post-video-summary-ui.md b/api-reference/experience-api/post-video-summary-ui.md index 2da510c6..c058fbcb 100644 --- a/api-reference/experience-api/post-video-summary-ui.md +++ b/api-reference/experience-api/post-video-summary-ui.md @@ -118,7 +118,7 @@ request.post({ Field | Required | Type | Description ---------- | ------- | ------- | ------- ```name``` | Mandatory | String | For Summary UI use `verbose-text-summary` and for Video Summary UI use `video-summary`. -```videoUrl```| Optional | String | URL of the video file for which you want to generate the `video-summary`. +```videoUrl```| Mandatory | String | URL of the video file for which you want to generate the `video-summary`. ```logo```| Optional | String | This field accepts public URL for setting custom logo in Video Summary UI(`video-summary`). ```favicon```| Optional | String | This field accepts public URL for setting custom favicon in Video Summary UI (`video-summary`). ```color```| Optional | Object | This option can be used to customise the colors of UI background, topics filter and insights filter elements in UI. diff --git a/management-api/conversation-groups/update-conversation-groups.md b/management-api/conversation-groups/update-conversation-groups.md index 12077b34..0144b159 100644 --- a/management-api/conversation-groups/update-conversation-groups.md +++ b/management-api/conversation-groups/update-conversation-groups.md @@ -63,6 +63,7 @@ Given below is another sample request containing more than one `criteria`. You c | Parameter | Data Type | Description | Required | Values Accepted | |--------|----------|---- | --- | ------| +`id` | String | `id` is the unique identifier of the Conversation Group you are tying to update. | Mandatory | `name` | String | Name of the group. | Mandatory | String with no special characters allowed, except `-`, `_`, and `”`. The maximum length of string allowed 128 characters. `description` | String | Description to capture any additional details of the group and its purpose. | Optional | The maximum length of string allowed 512 characters. `criteria` | String / RSQL format | Criteria in RSQL format that should be applied to group conversations under this group. | Mandatory | Valid RSQL string. For more information on how to write RSQL queries, click [here](https://github.com/jirutka/rsql-parser). diff --git a/pre-built-ui/text-summary-ui.md b/pre-built-ui/text-summary-ui.md index c80caa04..bd4ec38e 100644 --- a/pre-built-ui/text-summary-ui.md +++ b/pre-built-ui/text-summary-ui.md @@ -35,6 +35,6 @@ The Text Summary UI displays the following details: ### Tutorials - [Creating Text Summary UI](/docs/tutorials/pre-built-summary-ui/creating-text-summary-ui) -- [Turning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) +- [Tuning your Summary Page](/docs/pre-built-ui/tuning-summary-page) - [White label your Summary Page](/docs/tutorials/pre-built-summary-ui/whitelabeling-summary-ui) -- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) +- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) \ No newline at end of file diff --git a/pre-built-ui/video-summary-ui.md b/pre-built-ui/video-summary-ui.md index dd223b66..c836b272 100644 --- a/pre-built-ui/video-summary-ui.md +++ b/pre-built-ui/video-summary-ui.md @@ -73,6 +73,6 @@ The videoUrl only takes precedence when there is no Video present in the UI. ### Tutorials - [Creating Video Summary UI](/docs/tutorials/pre-built-summary-ui/creating-video-summary-ui) -- [Turning your Summary Page](/docs/tutorials/pre-built-summary-ui/tuning-summary-page) +- [Tuning your Summary Page](docs/pre-built-ui/tuning-summary-page) - [White label your Summary Page](/docs/tutorials/pre-built-summary-ui/whitelabeling-summary-ui) -- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) +- [Add custom domain to your Summary Page](/docs/tutorials/pre-built-summary-ui/custom-domain) \ No newline at end of file diff --git a/tutorials.md b/tutorials.md index 052234bd..a7b56761 100644 --- a/tutorials.md +++ b/tutorials.md @@ -18,7 +18,7 @@ Find detailed tutorials on how to implement Conversation Intelligence on your co * [Start and Stop Streaming API Connection](/docs/streamingapi/code-snippets/start-and-stop-connection) * [Receive Live Captioning](/docs/streamingapi/code-snippets/receive-live-captioning) -
+

Async APIs

@@ -31,7 +31,7 @@ Find detailed tutorials on how to implement Conversation Intelligence on your co * [Generate a Pre-built UI From Video Recordings](/docs/async-api/code-snippets/generate-pre-built-ui-from-video-recordings) - +
@@ -45,7 +45,7 @@ Find detailed tutorials on how to implement Conversation Intelligence on your co * [Receive Speech to Text for a different language in a conversation](/docs/telephony/code-snippets/receive-speech-to-text-for-a-different-language) - +
From 7d6bce896e93ddd6dd9d95a4c421dbbf1aca343f Mon Sep 17 00:00:00 2001 From: Pema <81958801+pema-s@users.noreply.github.com> Date: Fri, 17 Dec 2021 13:38:17 +0530 Subject: [PATCH 58/64] Revert "Addition of live examples" --- what-is-symbl.md | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/what-is-symbl.md b/what-is-symbl.md index a2ddcecb..c6b94414 100644 --- a/what-is-symbl.md +++ b/what-is-symbl.md @@ -40,8 +40,6 @@ The transcript is one of the easiest ways to navigate through the entire convers Transcripts can be generated in real-time for voice and video conversations or using a recorded file. They can also be accessed through the post-conversation summary UI. The post-conversation summary page enables editing, copying and sharing of transcripts from the conversation. -For example, -
@@ -52,8 +50,6 @@ They are not detected based on the frequency of their occurrences in the convers Each Topic has a score that indicates the importance of that topic in the context of the entire meeting. It is not that rare that even less frequently mentioned things are of higher importance in the conversation, and this will be reflected in a higher score for those topics, even if other Topics have a high number of mentions in the overall conversation. -For example, -
@@ -63,8 +59,6 @@ Sentiment Analysis is the interpretation of the general thought, feeling, or sen Symbl's Sentiment API works over Speech-to-Text sentences and Topics (or aspect). With Symbl's Sentiment Analysis feature, you can get the intensity of the sentiment and suggest sentiment type as negative, neutral or positive. -For example, -
@@ -83,8 +77,6 @@ The Comprehensive Action Items API is similar to the Action Items API except tha While both are equally powerful in providing Action Items that relate to a discussion, the Comprehensive Action Items API is designed to provide more details such as references to speaker names, context in which the action item was mentioned and an overall comprehensive description of the action items. -For example, -
@@ -112,9 +104,7 @@ Examples: ### ❇️ Trackers (Beta) When it comes to detecting specific or “contextually similar” occurrences of a particular context in any conversation, the most commonly faced challenge is when the speakers talk about the context in general but do not speak the exact phrases. The Trackers API will however detect both exact and similar phrases. -For example, - -“I don’t have any money” is contextually similar to “I ran out of budget” as both represent similar inherent meaning. +For example “I don’t have any money” is contextually similar to “I ran out of budget” as both represent similar inherent meaning. However, after listening/hearing the conversation, it is understood that it indeed has the context that was meant to be detected. @@ -150,16 +140,12 @@ Symbl's Conversational Intelligence distills important conversation messages and Summaries help you save time required to grasp the contents of a conversation. Using Summary API, you can create Summaries in real-time or after the conversation has ended. You can also create Summaries for chat or email messages. -For example, -
### ❇️ Topic Hierarchy (Beta) In any conversation, there can be multiple related topics that get discussed and it is possible to organize them in a hierarchy for better insights and consumption. Symbl's Topic Hierarchy algorithm finds a pattern in the conversation and creates parent (global) topics with each parent topic having multiple child topics nested within it. -For example, -
@@ -167,8 +153,6 @@ For example, A word or phrase that provides information necessary to fulfill a particular intent. Each entity belongs to a category specified by the entity's associated type. The platform generates entities related to the insight types for datetime and person. -For example, -
From 3553048009a058464481831f6294cbbaf507564a Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Fri, 17 Dec 2021 14:40:43 +0530 Subject: [PATCH 59/64] DC-215 --- python-sdk/telephony-api.md | 3 +-- python-sdk/telephony-sip.md | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/python-sdk/telephony-api.md b/python-sdk/telephony-api.md index e95272b0..2024a923 100644 --- a/python-sdk/telephony-api.md +++ b/python-sdk/telephony-api.md @@ -7,8 +7,7 @@ slug: /python-sdk/python-sdk-telephony-api import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -This tutorial provides code snippets and instructions on how to utilize Python SDK to call Symbl's Telephony API using PSTN protocol.
-To view the source code, go to the [open-source repository](https://github.com/symblai/symbl-python) in GitHub. +This tutorial provides code snippets and instructions on how to utilize Python SDK to call Symbl's Telephony API using PSTN protocol. To view the source code, go to the [open-source repository](https://github.com/symblai/symbl-python) in GitHub. The Python SDK provides the following capabilities: diff --git a/python-sdk/telephony-sip.md b/python-sdk/telephony-sip.md index 8745583e..d63c6658 100644 --- a/python-sdk/telephony-sip.md +++ b/python-sdk/telephony-sip.md @@ -11,6 +11,8 @@ This tutorial provides code snippets and instructions on how to utilize Python S The Python SDK provides the following capabilities: +- [Credential Handling](#credential-handling)
+ - [Start SIP Connection](#start-sip-connection)
- [Stop connection](#stop-connection)
@@ -19,6 +21,23 @@ The Python SDK provides the following capabilities: - [Subscribing to Events (transcript, questions, action-items, etc.)](#subscribe-to-events)
+## Credential Handling + +Symbl.ai's Python SDK simplifies the credential handling by allowing you either to add your credentials directly to the connection method's calls or else through a separate file saved to your execution directory. + +To add your credentials directly to the connection method's calls, add the following line: + +```python + credentials={app_id: , app_secret: }, +``` + +To handle credentials through a separate file saved your execution directory, add a file to your project called `symbl.conf` with the following configuration: + +```python +[credentials] +app_id= +app_secret= +``` ## Start SIP Connection From 9f6fa0283b0401414f9d23cfc8e7f27b55cf11d0 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Fri, 17 Dec 2021 15:25:57 +0530 Subject: [PATCH 60/64] DC-215 (Updated) --- python-sdk/telephony-api.md | 2 +- python-sdk/telephony-sip.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python-sdk/telephony-api.md b/python-sdk/telephony-api.md index 2024a923..207db73b 100644 --- a/python-sdk/telephony-api.md +++ b/python-sdk/telephony-api.md @@ -24,7 +24,7 @@ The Python SDK provides the following capabilities: ## Credential Handling -Symbl.ai's Python SDK simplifies the credential handling by allowing you either to add your credentials directly to the connection method's calls or else through a separate file saved to your execution directory. +The Python SDK simplifies the credential handling by allowing you to either add your credentials directly to the connection method's calls or else through a separate file saved to your execution directory. To add your credentials directly to the connection method's calls, add the following line: diff --git a/python-sdk/telephony-sip.md b/python-sdk/telephony-sip.md index d63c6658..75d96992 100644 --- a/python-sdk/telephony-sip.md +++ b/python-sdk/telephony-sip.md @@ -23,7 +23,7 @@ The Python SDK provides the following capabilities: ## Credential Handling -Symbl.ai's Python SDK simplifies the credential handling by allowing you either to add your credentials directly to the connection method's calls or else through a separate file saved to your execution directory. +The Python SDK simplifies the credential handling by allowing you to either add your credentials directly to the connection method's calls or else through a separate file saved to your execution directory. To add your credentials directly to the connection method's calls, add the following line: From bd6117420d5e6d7bbb65de47dcb57653cf71b0e4 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Mon, 20 Dec 2021 13:10:02 +0530 Subject: [PATCH 61/64] Update reference.md --- javascript-sdk/reference/reference.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/javascript-sdk/reference/reference.md b/javascript-sdk/reference/reference.md index 120507a4..4a862ebf 100644 --- a/javascript-sdk/reference/reference.md +++ b/javascript-sdk/reference/reference.md @@ -202,6 +202,19 @@ sdk.subscribeToConnection(connectionId, (data) => { } }); ``` + +--- + +### subscribeToStream + +The `subscribeToStream` function allows you to subscribe to an existing streaming connection in read-only mode. It takes the following parameters: + +| Parameters | Type | Example | +| ---------- | ------- | ------- | +| `id` | String | Connection ID created on connection `init`| + +This is a function of our [Subscribe API](/docs/subscribe-api). + --- ### pushEventOnConnection From 9ea70f4f08dd244d9cc0a460e63a6b539b24b279 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Wed, 22 Dec 2021 12:52:36 +0530 Subject: [PATCH 62/64] DC-270 + Links Fix + Tags Fix --- .../how-to-use-sentiment-analysis.md | 10 +++++-- .../receive-conversation-analytics.md | 3 +- async-api/code-snippets/receive-entities.md | 3 +- .../receive-speech-to-text-and-ai-insights.md | 6 ++-- .../sentiment-analysis-on-messages.md | 6 +++- .../sentiment-analysis-on-topics.md | 6 +++- .../track-phrases-in-a-conversation.md | 2 ++ ...rate-pre-built-ui-from-video-recordings.md | 2 ++ .../get-speaker-separation-audio-video.md | 2 ++ conversation-api/api-reference/summary.md | 27 +++++------------- .../concepts/comprehensive-action-items.md | 2 +- conversation-api/concepts/summarization.md | 28 ++++++------------- conversation-api/concepts/topic-hierarchy.md | 2 +- how-tos/get-sentiment-analysis.md | 2 +- .../summarization/adding-speaker-info.md | 13 ++------- tutorials/summarization/getting-summary.md | 13 ++------- tutorials/summarization/refreshing-summary.md | 13 ++------- 17 files changed, 60 insertions(+), 80 deletions(-) diff --git a/async-api/code-snippets/how-to-use-sentiment-analysis.md b/async-api/code-snippets/how-to-use-sentiment-analysis.md index 26b39526..fcb12b92 100644 --- a/async-api/code-snippets/how-to-use-sentiment-analysis.md +++ b/async-api/code-snippets/how-to-use-sentiment-analysis.md @@ -1,14 +1,20 @@ --- id: how-to-use-sentiment-analysis -title: How To Use Sentiment Analysis +title: How To Use Sentiment Analysis (Beta) --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. +::: + ## Process file -The first thing you need to do before getting your Sentiment Analysis is to process your audio, video or text file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. +The first thing you need to do before getting your Sentiment Analysis is to process your audio, video or text file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video-url) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio-url) or [POST Text File](/docs/async-api/overview/text/post-text) pages. :::caution You must wait for the job to complete processing before you proceed with getting the Conversation Intelligence. If you immediately make a GET request to Conversation API, it is possible that you'll receive incomplete insights. Therefore, ensure that you wait for the job to complete. diff --git a/async-api/code-snippets/receive-conversation-analytics.md b/async-api/code-snippets/receive-conversation-analytics.md index 63949d95..9df6006d 100644 --- a/async-api/code-snippets/receive-conversation-analytics.md +++ b/async-api/code-snippets/receive-conversation-analytics.md @@ -6,9 +6,10 @@ title: Receive Conversation Analytics import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- ## Process video file -The first thing you need to do before getting your conversational analytics is to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. +The first thing you need to do before getting your conversational analytics is to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video-url) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio-url) or [POST Text File](/docs/async-api/overview/text/post-text) pages. While we provide you with a default video URL for the API to process, which can be downloaded [here](https://symbltestdata.s3.us-east-2.amazonaws.com/sample_video_file.mp4), you can replace that with any other video URL. diff --git a/async-api/code-snippets/receive-entities.md b/async-api/code-snippets/receive-entities.md index 9f7c0cc5..aee00d43 100644 --- a/async-api/code-snippets/receive-entities.md +++ b/async-api/code-snippets/receive-entities.md @@ -6,9 +6,10 @@ title: Receive Entities From Conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- ## Process video file -The first thing you need to do before getting your entities is to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. +The first thing you need to do before getting your entities is to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video-url) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio-url) or [POST Text File](/docs/async-api/overview/text/post-text) pages. While we provide you with a default video URL for the API to process, which can be downloaded [here](https://symbltestdata.s3.us-east-2.amazonaws.com/sample_video_file.mp4), you can replace that with any other video URL. diff --git a/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md b/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md index 2b2a9394..3e48d467 100644 --- a/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md +++ b/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md @@ -6,11 +6,13 @@ title: Receive Speech-to-Text and AI Insights import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Process video file -The first thing you need to do before getting your speech-to-text and AI insights you need to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. +The first thing you need to do before getting your speech-to-text and AI insights you need to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video-url) page. -If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. +If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio-url) or [POST Text File](/docs/async-api/overview/text/post-text) pages. While we provide you with a default video URL for the API to process, which can be downloaded [here](https://symbltestdata.s3.us-east-2.amazonaws.com/sample_video_file.mp4), you can replace that with any other video URL. diff --git a/async-api/code-snippets/sentiment-analysis-on-messages.md b/async-api/code-snippets/sentiment-analysis-on-messages.md index ed82ec62..f8005cb9 100644 --- a/async-api/code-snippets/sentiment-analysis-on-messages.md +++ b/async-api/code-snippets/sentiment-analysis-on-messages.md @@ -1,6 +1,6 @@ --- id: sentiment-analysis-on-messages -title: Sentiment Analysis on Messages +title: Sentiment Analysis on Messages (Beta) --- import Tabs from '@theme/Tabs'; @@ -8,6 +8,10 @@ import TabItem from '@theme/TabItem'; --- +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. +::: + Sentiment Analysis is the process of determining whether each message/line in a conversation is positive, negative, or neutral. Sentiment analysis on messages combines natural language processing and machine learning procedures to allot sentiment scores to the message. :::note diff --git a/async-api/code-snippets/sentiment-analysis-on-topics.md b/async-api/code-snippets/sentiment-analysis-on-topics.md index 8c96f1fa..e73ea234 100644 --- a/async-api/code-snippets/sentiment-analysis-on-topics.md +++ b/async-api/code-snippets/sentiment-analysis-on-topics.md @@ -1,6 +1,6 @@ --- id: sentiment-analysis-on-topics -title: Sentiment Analysis on Topics +title: Sentiment Analysis on Topics (Beta) --- import Tabs from '@theme/Tabs'; @@ -8,6 +8,10 @@ import TabItem from '@theme/TabItem'; --- +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. +::: + Topics are key drivers of the conversation. They're the most important keywords or phrases used. The topics algorithm provides a framework for the user to calibrate and precisely model the relationship among the concepts and understand how the semantics of the meetings is talked upon. Sentiment Analysis on topics determines whether the Topics resulting from the conversation are positive, negative, or neutral. :::note diff --git a/async-api/code-snippets/track-phrases-in-a-conversation.md b/async-api/code-snippets/track-phrases-in-a-conversation.md index fe734f7d..b7ac7d4e 100644 --- a/async-api/code-snippets/track-phrases-in-a-conversation.md +++ b/async-api/code-snippets/track-phrases-in-a-conversation.md @@ -6,6 +6,8 @@ title: Track Phrases In A Conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::note In Beta Phase This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: diff --git a/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md b/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md index e6bb3fb6..1bf2c7e1 100644 --- a/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md +++ b/async-api/tutorials/generate-pre-built-ui-from-video-recordings.md @@ -7,6 +7,8 @@ slug: /async-api/code-snippets/generate-pre-built-ui-from-video-recordings import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + [Symbl's Async API](/docs/async-api/introduction) allows you to process audio, video or text data and transform them into AI insights such as Topics, Action Items, Questions, and more. In this guide, we will walk you through how to process a video recording and receive our [Pre-built UI](/docs/pre-built-ui/summary-ui). The Pre-built UI generates a UI which contains all the information and analysis from your conversation which can be shared through a shareable link. ![Video Summary UI](/img/summary-ui-intro.png) diff --git a/async-api/tutorials/get-speaker-separation-audio-video.md b/async-api/tutorials/get-speaker-separation-audio-video.md index 46e432ea..c797f759 100644 --- a/async-api/tutorials/get-speaker-separation-audio-video.md +++ b/async-api/tutorials/get-speaker-separation-audio-video.md @@ -7,6 +7,8 @@ sidebar_label: Speaker separation with Async API import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + [Symbl's Async API](/docs/async-api/introduction) allows you to process stored recordings of audio or video from files or URLs or even textual content from a conversation. In this guide, we will walk you through how to implement [Speaker Separation](/docs/async-api/reference/reference/#speaker-separation) with audio or video files. Speaker Separation, in short, is the ability to detect and separate unique speakers in a single stream of audio & video without the need for separate speaker events. diff --git a/conversation-api/api-reference/summary.md b/conversation-api/api-reference/summary.md index 6db848c2..235e5cd0 100644 --- a/conversation-api/api-reference/summary.md +++ b/conversation-api/api-reference/summary.md @@ -1,7 +1,7 @@ --- id: summary -title: GET Summary -sidebar_label: GET Summary +title: GET Summary (Beta) +sidebar_label: GET Summary (Beta) slug: /conversation-api/summary --- import Tabs from '@theme/Tabs'; @@ -9,18 +9,10 @@ import TabItem from '@theme/TabItem'; --- -:::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. - - -You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. - -**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary. - -For any queries or feedback, please contact us at labs@symbl.ai. +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: - This API allows you to get a [Summary](/docs/concepts/summarization) of important contextual messages in a conversation. Currently, Summaries cannot be generated in real-time. Support for creating Summary in real-time will be added soon. @@ -33,7 +25,7 @@ If the number of words in a conversation is below 50 or the number of sentences ### API Endpoint -**GET `https://api-labs.symbl.ai/v1/conversations/{conversationId}/summary`** +**GET `https://api.symbl.ai/v1/conversations/{conversationId}/summary`** ### Request Headers @@ -58,7 +50,7 @@ If you are using `x-api-key` we recommend that you use `Authorization` header in ```shell -curl --location --request GET 'https://api-labs.symbl.ai/v1/conversations/{conversationId}/summary' \ +curl --location --request GET 'https://api.symbl.ai/v1/conversations/{conversationId}/summary' \ --header "Authorization: Bearer $AUTH_TOKEN" \ # Set your access token here. See https://docs.symbl.ai/docs/developer-tools/authentication --header 'Content-Type: application/json' \ @@ -73,7 +65,7 @@ const request = require('request'); const authToken = AUTH_TOKEN;; request.get({ - url: `https://api-labs.symbl.ai/v1/conversations/{conversationId}/summary`, + url: `https://api.symbl.ai/v1/conversations/{conversationId}/summary`, headers: { 'Authorization': `Bearer ${authToken}` }, json: true }, (err, response, body) => { @@ -83,11 +75,6 @@ request.get({ -:::important - -The Conversation ID you receive after processing conversations with production Endpoint can be used to process Summary using the Labs Endpoint. -::: - ### Using Refresh Parameter You can use the `refresh=true` as query parameter in the Summary API for any of the following use-cases: diff --git a/conversation-api/concepts/comprehensive-action-items.md b/conversation-api/concepts/comprehensive-action-items.md index 1b21c21c..2ea4a46b 100644 --- a/conversation-api/concepts/comprehensive-action-items.md +++ b/conversation-api/concepts/comprehensive-action-items.md @@ -1,6 +1,6 @@ --- id: comprehensive-action-items -title: Comprehensive Action Items +title: Comprehensive Action Items (Labs) sidebar_label: Introduction slug: /concepts/comprehensive-action-items --- diff --git a/conversation-api/concepts/summarization.md b/conversation-api/concepts/summarization.md index b923b8bf..59b741ba 100644 --- a/conversation-api/concepts/summarization.md +++ b/conversation-api/concepts/summarization.md @@ -1,6 +1,6 @@ --- id: summarization -title: Summarization API- Capturing Key Points +title: Summarization API- Capturing Key Points (Beta) description: Use Symbl.ai’s summarization API to capture key points in a conversation and create succinct summaries. Learn more. sidebar_label: Introduction slug: /concepts/summarization @@ -11,15 +11,8 @@ import TabItem from '@theme/TabItem'; --- -:::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. - - -You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. - -**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary. - -For any queries or feedback, please contact us at labs@symbl.ai. +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: Symbl distills important messages and creates succinct Summaries for long conversations. You can get these Summaries using the [Summary API](/docs/conversation-api/summary). @@ -46,15 +39,13 @@ If the number of words in a conversation is below 50 or the number of sentences You can enable the Summary API for Async APIs using the following endpoints:
-Note that the base URL for Symbl Labs is always `https://api-labs.symbl.ai` - API | Summary Endpoint ---------- | ------- -[Async Text API (POST/PUT)](/docs/async-api/overview/text/post-text)| ```https://api-labs.symbl.ai/v1/process/text?enableSummary=true ``` -[Async Audio API (POST/PUT)](/docs/async-api/overview/audio/post-audio)| ```https://api-labs.symbl.ai/v1/process/audio?enableSummary=true``` -[Async Audio URL API (POST/PUT)](/docs/async-api/overview/audio/post-audio-url)| ```https://api-labs.symbl.ai/v1/process/audio/url?enableSummary=true``` -[Async Video API (POST/PUT)](/docs/async-api/overview/video/post-video)| ```https://api-labs.symbl.ai/v1/process/video?enableSummary=true``` -[Async Video URL API (POST/PUT)](/docs/async-api/overview/video/post-video-url)| ```https://api-labs.symbl.ai/v1/process/video/url?enableSummary=true``` +[Async Text API (POST/PUT)](/docs/async-api/overview/text/post-text)| ```https://api.symbl.ai/v1/process/text?enableSummary=true ``` +[Async Audio API (POST/PUT)](/docs/async-api/overview/audio/post-audio)| ```https://api.symbl.ai/v1/process/audio?enableSummary=true``` +[Async Audio URL API (POST/PUT)](/docs/async-api/overview/audio/post-audio-url)| ```https://api.symbl.ai/v1/process/audio/url?enableSummary=true``` +[Async Video API (POST/PUT)](/docs/async-api/overview/video/post-video)| ```https://api.symbl.ai/v1/process/video?enableSummary=true``` +[Async Video URL API (POST/PUT)](/docs/async-api/overview/video/post-video-url)| ```https://api.symbl.ai/v1/process/video/url?enableSummary=true``` Once the above API job is complete, the corresponding Summary can be obtained by sending a GET request to the Summary API. See the [**Summary API Documentation**](/docs/conversation-api/summary) for details. @@ -63,5 +54,4 @@ Once the above API job is complete, the corresponding Summary can be obtained by ## Tutorials - [How to get a Summary using Async APIs](/docs/tutorials/summarization/getting-summary) - [How to Refresh a Summary](/docs/tutorials/summarization/refreshing-summary) -- [Providing Speaker Information to generate Summary](/docs/tutorials/summarization/adding-speaker-info) - +- [Providing Speaker Information to generate Summary](/docs/tutorials/summarization/adding-speaker-info) \ No newline at end of file diff --git a/conversation-api/concepts/topic-hierarchy.md b/conversation-api/concepts/topic-hierarchy.md index 045e5ad5..fdbde1ea 100644 --- a/conversation-api/concepts/topic-hierarchy.md +++ b/conversation-api/concepts/topic-hierarchy.md @@ -1,6 +1,6 @@ --- id: topic-hierarchy -title: Topic Hierarchy +title: Topic Hierarchy (Beta) sidebar_label: Introduction slug: /concepts/topic-hierarchy --- diff --git a/how-tos/get-sentiment-analysis.md b/how-tos/get-sentiment-analysis.md index 8801118c..c1661450 100644 --- a/how-tos/get-sentiment-analysis.md +++ b/how-tos/get-sentiment-analysis.md @@ -1,6 +1,6 @@ --- id: how-to-get-sentiment-analysis -title: Getting Sentiment Analysis +title: Getting Sentiment Analysis (Beta) sidebar_label: Real-time Sentiment Analysis slug: /how-tos/get-sentiment-analysis --- diff --git a/tutorials/summarization/adding-speaker-info.md b/tutorials/summarization/adding-speaker-info.md index b7d631fb..9f3b5597 100644 --- a/tutorials/summarization/adding-speaker-info.md +++ b/tutorials/summarization/adding-speaker-info.md @@ -1,6 +1,6 @@ --- id: adding-speaker-info -title: Providing Speaker Information to generate Summary +title: Providing Speaker Information to generate Summary (Beta) sidebar_label: Providing Speaker Information to generate Summary slug: /tutorials/summarization/adding-speaker-info --- @@ -10,15 +10,8 @@ import TabItem from '@theme/TabItem'; --- -:::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. - - -You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. - -**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary. - -For any queries or feedback, please contact us at labs@symbl.ai. +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: Summaries are generated best when used with Speaker information captured in the conversation. It is highly recommended that you send us the speaker information to use this feature effectively. diff --git a/tutorials/summarization/getting-summary.md b/tutorials/summarization/getting-summary.md index 38a1b41a..49bca403 100644 --- a/tutorials/summarization/getting-summary.md +++ b/tutorials/summarization/getting-summary.md @@ -1,6 +1,6 @@ --- id: getting-summary -title: How to get a Summary using Async API +title: How to get a Summary using Async API (Beta) sidebar_label: Get Summary using Async API slug: /tutorials/summarization/getting-summary --- @@ -9,15 +9,8 @@ import TabItem from '@theme/TabItem'; --- -:::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. - - -You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. - -**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary. - -For any queries or feedback, please contact us at labs@symbl.ai. +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: This tutorial provides a step-by-step instructions on how to get a Summary using Async APIs. diff --git a/tutorials/summarization/refreshing-summary.md b/tutorials/summarization/refreshing-summary.md index 376a97ad..c6779510 100644 --- a/tutorials/summarization/refreshing-summary.md +++ b/tutorials/summarization/refreshing-summary.md @@ -1,6 +1,6 @@ --- id: refreshing-summary -title: How to Refresh a Summary +title: How to Refresh a Summary (Beta) sidebar_label: Refreshing a Summary slug: /tutorials/summarization/refreshing-summary --- @@ -8,15 +8,8 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; --- -:::info Symbl Labs -This feature is a part of the Symbl Labs. Symbl Labs is our experimental wing designed to share our bleeding edge AI research on human conversations with anyone who wants to explore its limits. - - -You can access the Labs features using your Symbl App Id and Secret. If you don't already have it, sign up on [platform](https://platform.symbl.ai/#/login) to get your credentials. - -**Note**: The usage of data for Labs projects is stored for enhancing our research. We may continue to build, iterate, mutate or discontinue any of the below given features on the sole discretion of our team as deemed necessary. - -For any queries or feedback, please contact us at labs@symbl.ai. +:::note In Beta Phase +This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: When you wish to renegerate a Summary that you generated earlier, you can do so in two ways: From 7c86b02651a8efaa2973f90be874def5ce9ac800 Mon Sep 17 00:00:00 2001 From: amritesh-singh <88492460+amritesh-singh@users.noreply.github.com> Date: Fri, 31 Dec 2021 16:22:16 +0530 Subject: [PATCH 63/64] DC-274+DC-224 --- async-api/code-snippets/how-to-use-sentiment-analysis.md | 2 ++ async-api/code-snippets/receive-conversation-analytics.md | 2 ++ async-api/code-snippets/receive-entities.md | 2 ++ .../code-snippets/receive-speech-to-text-and-ai-insights.md | 2 ++ async-api/code-snippets/track-phrases-in-a-conversation.md | 2 ++ async-api/introduction.md | 2 ++ async-api/overview/async-api-supported-languages.md | 2 ++ async-api/overview/async-diarization.md | 2 ++ async-api/overview/introduction.md | 2 ++ conversation-api/api-reference/analytics.md | 1 + conversation-api/api-reference/conversation.md | 2 ++ conversation-api/api-reference/delete.md | 2 ++ conversation-api/api-reference/entities.md | 1 + conversation-api/api-reference/insights.md | 2 ++ conversation-api/api-reference/members.md | 2 ++ conversation-api/api-reference/messages.md | 2 ++ conversation-api/api-reference/questions.md | 2 ++ conversation-api/api-reference/speakers.md | 2 ++ conversation-api/api-reference/topics.md | 2 ++ conversation-api/api-reference/transcript.md | 2 ++ conversation-api/api-reference/update-members.md | 1 + conversation-api/introduction.md | 2 ++ developer-tools/audio-conversion.md | 2 ++ getting-started-with-async-api.md | 2 ++ getting-started-with-streaming-api.md | 2 ++ getting-started-with-telephony-api.md | 2 ++ how-tos/build-with-amazon-chime.md | 2 ++ javascript-sdk/overview/active-speaker-events.md | 2 ++ javascript-sdk/overview/connect-to-endpoints.md | 2 ++ javascript-sdk/overview/streaming-audio-real-time.md | 2 ++ javascript-sdk/overview/subscribe-real-time.md | 2 ++ javascript-sdk/reference/reference.md | 1 + .../streaming/code-snippets/set-language-using-sdk.md | 2 ++ .../code-snippets/set-language-and-timezone-telephony.md | 2 ++ javascript-sdk/tutorials/get-realtime-transcription-js-sdk.md | 1 + javascript-sdk/tutorials/pass-audio-codecs.md | 2 ++ javascript-sdk/tutorials/push-audio-get-realtime-data.md | 1 + .../tutorials/push-speakerevents-get-summary-url.md | 2 ++ pre-built-ui/summary-ui.md | 2 ++ python-sdk/async-audio.md | 1 + python-sdk/async-text.md | 1 + python-sdk/async-video.md | 2 ++ python-sdk/conversation-api.md | 2 ++ python-sdk/python-sdk-reference.md | 2 ++ python-sdk/streaming-api.md | 2 ++ python-sdk/telephony-api.md | 2 ++ python-sdk/telephony-sip.md | 2 ++ .../code-snippets/consume-trackers-with-streaming-api.md | 2 ++ streamingapi/code-snippets/detect-key-phrases.md | 2 ++ streamingapi/code-snippets/receive-live-captioning.md | 2 ++ streamingapi/code-snippets/receive-live-insights.md | 2 ++ streamingapi/code-snippets/receive-live-topics.md | 2 ++ .../receive-speech-to-text-for-different-languages.md | 2 ++ .../code-snippets/start-and-stop-streaming-api-connection.md | 2 ++ streamingapi/concepts.md | 2 ++ streamingapi/introduction.md | 1 + streamingapi/tutorials/get-real-time-sentiment-analysis.md | 2 ++ streamingapi/tutorials/get-realtime-transcription.md | 2 ++ telephony/code-snippets/connect-to-pstn.md | 2 ++ telephony/code-snippets/connect-to-sip.md | 2 ++ .../receive-prebuilt-ui-email-after-conversation.md | 2 ++ .../receive-speech-to-text-for-a-different-language.md | 2 ++ telephony/concepts/concepts.md | 2 ++ telephony/tutorials/connect-to-zoom-with-telephony-api.md | 2 ++ telephony/tutorials/get-live-transcription-telephony-api.md | 2 ++ tutorials/summarization/adding-speaker-info.md | 4 ---- 66 files changed, 121 insertions(+), 4 deletions(-) diff --git a/async-api/code-snippets/how-to-use-sentiment-analysis.md b/async-api/code-snippets/how-to-use-sentiment-analysis.md index 26b39526..956c1e36 100644 --- a/async-api/code-snippets/how-to-use-sentiment-analysis.md +++ b/async-api/code-snippets/how-to-use-sentiment-analysis.md @@ -6,6 +6,8 @@ title: How To Use Sentiment Analysis import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Process file The first thing you need to do before getting your Sentiment Analysis is to process your audio, video or text file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. diff --git a/async-api/code-snippets/receive-conversation-analytics.md b/async-api/code-snippets/receive-conversation-analytics.md index 63949d95..4ca89122 100644 --- a/async-api/code-snippets/receive-conversation-analytics.md +++ b/async-api/code-snippets/receive-conversation-analytics.md @@ -6,6 +6,8 @@ title: Receive Conversation Analytics import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Process video file The first thing you need to do before getting your conversational analytics is to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. diff --git a/async-api/code-snippets/receive-entities.md b/async-api/code-snippets/receive-entities.md index 9f7c0cc5..0de84fb4 100644 --- a/async-api/code-snippets/receive-entities.md +++ b/async-api/code-snippets/receive-entities.md @@ -6,6 +6,8 @@ title: Receive Entities From Conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Process video file The first thing you need to do before getting your entities is to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. If you want to use an audio or text file you can use the code from the [POST Audio URL](/docs/async-api/overview/audio/post-audio) or [POST Text File](/docs/async-api/overview/text/post-text) pages. diff --git a/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md b/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md index 2b2a9394..580b9b25 100644 --- a/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md +++ b/async-api/code-snippets/receive-speech-to-text-and-ai-insights.md @@ -6,6 +6,8 @@ title: Receive Speech-to-Text and AI Insights import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Process video file The first thing you need to do before getting your speech-to-text and AI insights you need to process your video file. This code is taken directly from our [POST Video URL](/docs/async-api/overview/video/post-video) page. diff --git a/async-api/code-snippets/track-phrases-in-a-conversation.md b/async-api/code-snippets/track-phrases-in-a-conversation.md index fe734f7d..b7ac7d4e 100644 --- a/async-api/code-snippets/track-phrases-in-a-conversation.md +++ b/async-api/code-snippets/track-phrases-in-a-conversation.md @@ -6,6 +6,8 @@ title: Track Phrases In A Conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::note In Beta Phase This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach out to us at devrelations@symbl.ai. ::: diff --git a/async-api/introduction.md b/async-api/introduction.md index f8a4b95c..9336d26d 100644 --- a/async-api/introduction.md +++ b/async-api/introduction.md @@ -6,6 +6,8 @@ sidebar_label: Introduction slug: /async-api/introduction --- +--- + The Async API provides a REST interface that helps you to submit any recorded or saved conversations to Symbl. When you submit a conversation, you'll receive a Conversation ID (`conversationId`), which is unique to your conversation. ![Async API Diagram](/img/asyncDiagram.png) diff --git a/async-api/overview/async-api-supported-languages.md b/async-api/overview/async-api-supported-languages.md index 5811f00c..6bb701af 100644 --- a/async-api/overview/async-api-supported-languages.md +++ b/async-api/overview/async-api-supported-languages.md @@ -4,6 +4,8 @@ title: What languages does Async API Support? sidebar_label: Languages Supported --- +--- + The Async Audio and Async Video APIs can work with languages other than English. The following list of languages(with their [BCP-47](https://en.wikipedia.org/wiki/IETF_language_tag) language-codes) are currently supported: diff --git a/async-api/overview/async-diarization.md b/async-api/overview/async-diarization.md index 021995ca..586da9ac 100644 --- a/async-api/overview/async-diarization.md +++ b/async-api/overview/async-diarization.md @@ -4,6 +4,8 @@ title: Speaker Separation sidebar_label: Speaker Separation --- +--- + The Async Audio & Async Video APIs can detect and separate unique speakers in a single stream of audio & video without need of separate speaker events. To enable this capability with either of the APIs the `enableSpeakerDiarization` and `diarizationSpeakerCount` query parameters need to be passed with the request. diff --git a/async-api/overview/introduction.md b/async-api/overview/introduction.md index 903b8b7d..0a91718a 100644 --- a/async-api/overview/introduction.md +++ b/async-api/overview/introduction.md @@ -7,6 +7,8 @@ sidebar_label: Introduction import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The Async API provides a REST interface which helps you to submit any recorded or saved conversations to Symbl. When you submit a conversation, you'll receive a Conversation ID (`conversationId`), which is unique to your conversation. diff --git a/conversation-api/api-reference/analytics.md b/conversation-api/api-reference/analytics.md index 62555116..84d44545 100644 --- a/conversation-api/api-reference/analytics.md +++ b/conversation-api/api-reference/analytics.md @@ -7,6 +7,7 @@ slug: /conversation-api/analytics import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- Analytics API provides you with functionality like finding speaker ratio, talk time, silence, pace and overlap in a conversation. diff --git a/conversation-api/api-reference/conversation.md b/conversation-api/api-reference/conversation.md index 4d6f98a0..0bfc3fc2 100644 --- a/conversation-api/api-reference/conversation.md +++ b/conversation-api/api-reference/conversation.md @@ -7,6 +7,8 @@ slug: /conversation-api/conversation-data import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API returns the conversation meta-data like meeting name, member name and email, start and end time of the meeting, meeting type and meeting ID. It returns data for a specific conversation (using `conversationId`). If you wish to get all the conversations, see [GET All Conversations](/docs/conversation-api/all-conversations) page. diff --git a/conversation-api/api-reference/delete.md b/conversation-api/api-reference/delete.md index 82f19bd4..4c928ca7 100644 --- a/conversation-api/api-reference/delete.md +++ b/conversation-api/api-reference/delete.md @@ -6,6 +6,8 @@ slug: /conversation-api/delete-conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API permanently deletes conversation and all related entities such as messages, insights, topics etc. associated with the Conversation ID. :::caution diff --git a/conversation-api/api-reference/entities.md b/conversation-api/api-reference/entities.md index 6f8d57bf..30bd1c5c 100644 --- a/conversation-api/api-reference/entities.md +++ b/conversation-api/api-reference/entities.md @@ -8,6 +8,7 @@ slug: /conversation-api/entities import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- :::info This is a Beta API - Undergoing further development. diff --git a/conversation-api/api-reference/insights.md b/conversation-api/api-reference/insights.md index f5291ed3..7afdd20a 100644 --- a/conversation-api/api-reference/insights.md +++ b/conversation-api/api-reference/insights.md @@ -8,6 +8,8 @@ slug: /conversation-api/insights import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Returns all the insights in a conversation including Topics, Questions and Action Items ### HTTP Request diff --git a/conversation-api/api-reference/members.md b/conversation-api/api-reference/members.md index 55e35c53..4e3b47c4 100644 --- a/conversation-api/api-reference/members.md +++ b/conversation-api/api-reference/members.md @@ -8,6 +8,8 @@ slug: /conversation-api/members import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API returns a list of all the members in a conversation. A Member is referred to a participant in the conversation that is uniquely identified as a speaker. Identifying different participants in the meetings can be done by implementing speaker separation. For more details on identifying members by [Speaker Events or Active Talker events](/docs/javascript-sdk/tutorials/push-speakerevents-get-summary-url) in Real-time using Voice SDK. diff --git a/conversation-api/api-reference/messages.md b/conversation-api/api-reference/messages.md index 277028e3..c11de52d 100644 --- a/conversation-api/api-reference/messages.md +++ b/conversation-api/api-reference/messages.md @@ -8,6 +8,8 @@ slug: /conversation-api/messages import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The Messages API returns a list of all the messages in a conversation. You can use this for providing **Speech to Text data (also known as transcription sometimes)** for video conference, meeting or telephone call. Here message refer to a continuous sentence spoken by a speaker. diff --git a/conversation-api/api-reference/questions.md b/conversation-api/api-reference/questions.md index b87eea1e..64fef45b 100644 --- a/conversation-api/api-reference/questions.md +++ b/conversation-api/api-reference/questions.md @@ -8,6 +8,8 @@ slug: /conversation-api/questions import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API help you find explicit question or request for information that comes up during the conversation, whether answered or not, is recognized as a question. ### HTTP Request diff --git a/conversation-api/api-reference/speakers.md b/conversation-api/api-reference/speakers.md index 645c69fe..8157e3b8 100644 --- a/conversation-api/api-reference/speakers.md +++ b/conversation-api/api-reference/speakers.md @@ -8,6 +8,8 @@ slug: /conversation-api/speaker-events import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Speaker Events API provides the functionality to update Speakers who spoke in the conversation after it has been processed. This is achieved by sending the API the list of Speaker Events for that conversation which the API then uses for associating it with the content of the same. diff --git a/conversation-api/api-reference/topics.md b/conversation-api/api-reference/topics.md index 0436e9be..580b063a 100644 --- a/conversation-api/api-reference/topics.md +++ b/conversation-api/api-reference/topics.md @@ -8,6 +8,8 @@ slug: /conversation-api/get-topics import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The most relevant topics of discussion from the conversation that are generated based on the combination of the overall scope of the discussion. diff --git a/conversation-api/api-reference/transcript.md b/conversation-api/api-reference/transcript.md index 2e236588..973576f8 100644 --- a/conversation-api/api-reference/transcript.md +++ b/conversation-api/api-reference/transcript.md @@ -8,6 +8,8 @@ slug: /conversation-api/transcript import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API returns a formatted transcript in [Markdown](#create-transcript-in-markdown) and [SRT](#create-transcript-in-srt) format. ## Create Transcript in Markdown diff --git a/conversation-api/api-reference/update-members.md b/conversation-api/api-reference/update-members.md index 6a8fa86c..33bf024f 100644 --- a/conversation-api/api-reference/update-members.md +++ b/conversation-api/api-reference/update-members.md @@ -8,6 +8,7 @@ slug: /conversation-api/update-members import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- Update an existing member in an conversation. This API can be used for updating the unique speakers detected as members from diarization as well. diff --git a/conversation-api/introduction.md b/conversation-api/introduction.md index 3cff2608..869977f9 100644 --- a/conversation-api/introduction.md +++ b/conversation-api/introduction.md @@ -8,6 +8,8 @@ sidebar_label: Introduction import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The Conversation API provides a REST API interface for getting your processed Speech to Text data(also known as Transcripts) and Conversational Insights. To view insights about a conversion, you must provide the API with a Conversation ID. diff --git a/developer-tools/audio-conversion.md b/developer-tools/audio-conversion.md index 7a75a5ed..04eafad7 100644 --- a/developer-tools/audio-conversion.md +++ b/developer-tools/audio-conversion.md @@ -7,6 +7,8 @@ sidebar_label: Audio Conversion import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ### Async Audio Conversion Async Audio API supports files of either `.wav` or `.mp3` and the file must have `mono-channel audio` only. Any other file formats can be converted using the code snippet from [FFmpeg](https://github.com/fluent-ffmpeg/node-fluent-ffmpeg) diff --git a/getting-started-with-async-api.md b/getting-started-with-async-api.md index 72735f80..50eb5013 100644 --- a/getting-started-with-async-api.md +++ b/getting-started-with-async-api.md @@ -8,6 +8,8 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; import useBaseUrl from '@docusaurus/useBaseUrl'; +--- + Use the Async API to get speech-to-text transcription from audio or video and extract follow-ups, topics, action-items, and questions from them. ## 1. Process an audio file using Async API diff --git a/getting-started-with-streaming-api.md b/getting-started-with-streaming-api.md index 30e862dd..04b8d94c 100644 --- a/getting-started-with-streaming-api.md +++ b/getting-started-with-streaming-api.md @@ -7,6 +7,8 @@ slug: /getting-started-with-streaming-api import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The Streaming API provides live speech-to-text transcription and extracts follow-ups, topics, action-items, and questions from conversations as they unfold in real-time. diff --git a/getting-started-with-telephony-api.md b/getting-started-with-telephony-api.md index 77fc9217..ed91986d 100644 --- a/getting-started-with-telephony-api.md +++ b/getting-started-with-telephony-api.md @@ -7,6 +7,8 @@ slug: /getting-started-with-telephony-api import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Telephony API provides a speech-to-text transcription and enables you to extract follow-ups, topics, action-items, and questions from conversations over the Zoom call. ## 1. Get a transcription of your Zoom call. diff --git a/how-tos/build-with-amazon-chime.md b/how-tos/build-with-amazon-chime.md index c54bb0bb..a43bc202 100644 --- a/how-tos/build-with-amazon-chime.md +++ b/how-tos/build-with-amazon-chime.md @@ -7,6 +7,8 @@ sidebar_label: Build with Amazon Chime import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The Symbl Conversation AI Adapter for Chime SDK is the simplest way to get started with Symbl in your Amazon Chime video platform. Currently, the Symbl Conversational AI Adapter has the following features: diff --git a/javascript-sdk/overview/active-speaker-events.md b/javascript-sdk/overview/active-speaker-events.md index a1672710..0e723996 100644 --- a/javascript-sdk/overview/active-speaker-events.md +++ b/javascript-sdk/overview/active-speaker-events.md @@ -7,6 +7,8 @@ slug: /javascript-sdk/code-snippets/active-speaker-events import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::note Pushing events is optional. If you don't have audio to process, then you can skip this step. ::: diff --git a/javascript-sdk/overview/connect-to-endpoints.md b/javascript-sdk/overview/connect-to-endpoints.md index 8fde19a4..2a856dba 100644 --- a/javascript-sdk/overview/connect-to-endpoints.md +++ b/javascript-sdk/overview/connect-to-endpoints.md @@ -7,6 +7,8 @@ slug: /javascript-sdk/code-snippets/connect-to-endpoints import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + The code snippet below dials in using PSTN and hangs up after 60 seconds. To view the source code, go to the [open-source repository](https://github.com/symblai/symbl-python) in GitHub. diff --git a/python-sdk/telephony-sip.md b/python-sdk/telephony-sip.md index 8745583e..f9c91587 100644 --- a/python-sdk/telephony-sip.md +++ b/python-sdk/telephony-sip.md @@ -7,6 +7,8 @@ slug: /python-sdk/python-sdk-telephony-sips import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This tutorial provides code snippets and instructions on how to utilize Python SDK to call Symbl's Telephony API using SIP. To view the source code, browse the [open-source repository](https://github.com/symblai/symbl-python) in GitHub. The Python SDK provides the following capabilities: diff --git a/streamingapi/code-snippets/consume-trackers-with-streaming-api.md b/streamingapi/code-snippets/consume-trackers-with-streaming-api.md index e9f96e41..7bed471c 100644 --- a/streamingapi/code-snippets/consume-trackers-with-streaming-api.md +++ b/streamingapi/code-snippets/consume-trackers-with-streaming-api.md @@ -7,6 +7,8 @@ slug: /streaming-api/code-snippets/consume-trackers-with-streaming-api import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Below is an example that shows how to pass Trackers in the config object for the startRealtimeRequest of the Symbl’s JS SDK. This example also shows how to consume the results of the detected Trackers in real-time. ```js diff --git a/streamingapi/code-snippets/detect-key-phrases.md b/streamingapi/code-snippets/detect-key-phrases.md index 94dc8084..21727793 100644 --- a/streamingapi/code-snippets/detect-key-phrases.md +++ b/streamingapi/code-snippets/detect-key-phrases.md @@ -6,6 +6,8 @@ title: Detect Key Phrases import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::note In Beta Phase This feature is in the Beta phase. If you have any questions, ideas or suggestions please reach us at devrelations@symbl.ai. ::: diff --git a/streamingapi/code-snippets/receive-live-captioning.md b/streamingapi/code-snippets/receive-live-captioning.md index 5ce00a15..3673b01a 100644 --- a/streamingapi/code-snippets/receive-live-captioning.md +++ b/streamingapi/code-snippets/receive-live-captioning.md @@ -6,6 +6,8 @@ title: Receive Live Captioning import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This example goes over how you can use the Symbl Streaming API to do live captioning. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction), which is meant to be run using Node.js, and native Javascript which can be run in the browser: ## Connect diff --git a/streamingapi/code-snippets/receive-live-insights.md b/streamingapi/code-snippets/receive-live-insights.md index 6a8c9798..0b6ad8c3 100644 --- a/streamingapi/code-snippets/receive-live-insights.md +++ b/streamingapi/code-snippets/receive-live-insights.md @@ -6,6 +6,8 @@ title: Receive Live AI Insights import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This example goes over how you can use the Symbl Streaming API to receive [Insights](/docs/conversation-api/insights), which are what the Symbl API determines to be the most important keywords or phrases used in a conversation. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction), which is meant to be run using Node.js, and native Javascript which can be run in the browser: ## Connect diff --git a/streamingapi/code-snippets/receive-live-topics.md b/streamingapi/code-snippets/receive-live-topics.md index 7aceff7f..bc9d957c 100644 --- a/streamingapi/code-snippets/receive-live-topics.md +++ b/streamingapi/code-snippets/receive-live-topics.md @@ -6,6 +6,8 @@ title: Receive Live Topics import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This example goes over how you can use the Symbl Streaming API to receive [Topics](/docs/concepts/topics), which are what the Symbl API determines to be the most important keywords or phrases used in a conversation. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction), which is meant to be run using Node.js, and native Javascript which can be run in the browser: ## Connect diff --git a/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md b/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md index 58381296..45d44094 100644 --- a/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md +++ b/streamingapi/code-snippets/receive-speech-to-text-for-different-languages.md @@ -6,6 +6,8 @@ title: Receive Speech to Text for a different language in a conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This example goes over how you can use the Symbl Streaming API to receive a speech-to-text transcription of a conversation in many different languages. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction) and native Javascript. :::note diff --git a/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md b/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md index 78127588..19475cae 100644 --- a/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md +++ b/streamingapi/code-snippets/start-and-stop-streaming-api-connection.md @@ -7,6 +7,8 @@ slug: /streamingapi/code-snippets/start-and-stop-connection import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Start Connection Use this code to start a connection to the Streaming API using Javascript WebSockets. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction), which is meant to be run using Node.js, and native Javascript which can be run in the browser: diff --git a/streamingapi/concepts.md b/streamingapi/concepts.md index 33fecb9d..5019f5c1 100644 --- a/streamingapi/concepts.md +++ b/streamingapi/concepts.md @@ -8,6 +8,8 @@ slug: /concepts/websockets import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Symbl's Streaming API is based on WebSocket protocol. This Web Socket is a general-purpose protocol that suits any application designed for real-time, two-way communication within a browser — like chat apps, collaboration software, and multiplayer games. ## What is a Websocket? diff --git a/streamingapi/introduction.md b/streamingapi/introduction.md index e8aee566..2ac108c0 100644 --- a/streamingapi/introduction.md +++ b/streamingapi/introduction.md @@ -9,6 +9,7 @@ slug: /streamingapi/introduction import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- Symbl's Streaming API is based on WebSocket protocol and can be used for real-time use-cases where both the audio and its results from Symbl's back-end need to be available in real-time. It can be integrated directly via the browser or server. diff --git a/streamingapi/tutorials/get-real-time-sentiment-analysis.md b/streamingapi/tutorials/get-real-time-sentiment-analysis.md index 9fa6f50a..a3e5027d 100644 --- a/streamingapi/tutorials/get-real-time-sentiment-analysis.md +++ b/streamingapi/tutorials/get-real-time-sentiment-analysis.md @@ -7,6 +7,8 @@ slug: /streamingapi/tutorials/get-real-time-sentiment-analysis-from-your-web-bro import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + In this guide you will learn how to get started with Symbl’s native Streaming API, which is our most accurate API for conversation analysis. Symbl's Streaming API is an API for enabling real-time conversational analysis on voice, video, or chat, or any live streaming directly through your web browser. If you have voice, video, or chat enabled, Symbl's API for streaming enables you to tap the raw conversational data of those streams. In addition to the setting up Symbl.ai's Streaming API, you create a function that logs sentiment analysis in real-time. Sentiment analysis operates through a call to the Message API with a query parameter. You can view the complete code sample for this tutorial on [GitHub](https://github.com/symblai/logging-real-time-sentiments): diff --git a/streamingapi/tutorials/get-realtime-transcription.md b/streamingapi/tutorials/get-realtime-transcription.md index d6cf5dd4..f8206689 100644 --- a/streamingapi/tutorials/get-realtime-transcription.md +++ b/streamingapi/tutorials/get-realtime-transcription.md @@ -7,6 +7,8 @@ slug: /streamingapi/tutorials/receive-ai-insights-from-your-web-browser import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + In this guide you will learn how to get started with Symbl’s native Streaming API, which is our most accurate API for conversation analysis. Symbl's Streaming API is an API for enabling real-time conversational analysis on voice, video, or chat, or any live streaming directly through your web browser. If you have voice, video, or chat enabled, Symbl's API for streaming enables you to tap the raw conversational data of those streams. You can view the complete code sample for this tutorial on [GitHub](https://github.com/symblai/real-time-speech-recognition-with-websockets): diff --git a/telephony/code-snippets/connect-to-pstn.md b/telephony/code-snippets/connect-to-pstn.md index fd144547..2eabee46 100644 --- a/telephony/code-snippets/connect-to-pstn.md +++ b/telephony/code-snippets/connect-to-pstn.md @@ -5,6 +5,8 @@ title: Connect to a PSTN connection to get Speech to Text and AI Insights import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Connect to a call with PSTN The following code example shows how you can connect the Telephony API to your cell phone or any other type of phone number using PSTN (**Currently, only US phone numbers work**). Making a phone call is also the quickest way to test Symbl’s Telephony API. It can make an outbound call to a phone number using a traditional public switched telephony network ([PSTN](https://en.wikipedia.org/wiki/Public_switched_telephone_network)). diff --git a/telephony/code-snippets/connect-to-sip.md b/telephony/code-snippets/connect-to-sip.md index 804a07ce..620fd6ca 100644 --- a/telephony/code-snippets/connect-to-sip.md +++ b/telephony/code-snippets/connect-to-sip.md @@ -5,6 +5,8 @@ title: Connect to a SIP connection to get Speech to Text and AI Insights import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + ## Connect to a call The following code sample shows how you can connect the Telephony API to your cell phone (or any other type of phone number). Making a phone call is also the quickest way to test Symbl’s Telephony API. It can make an outbound call to a phone number using SIP endpoints that can be accessed over the internet using a SIP URI. diff --git a/telephony/code-snippets/receive-prebuilt-ui-email-after-conversation.md b/telephony/code-snippets/receive-prebuilt-ui-email-after-conversation.md index 047ccf2c..4d7c70ff 100644 --- a/telephony/code-snippets/receive-prebuilt-ui-email-after-conversation.md +++ b/telephony/code-snippets/receive-prebuilt-ui-email-after-conversation.md @@ -5,6 +5,8 @@ title: Receive Prebuilt Summary UI email after each conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This snippet goes into detail about how to receive Symbl's Prebuilt Summary UI email after a conversation is finished. The Prebuilt Summary UI email details the conversation using speech-to-text transcription and shows any AI insights gathered during the conversation. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction) and native Javascript. #### This is an example of the summary page you can expect to receive at the end of your call diff --git a/telephony/code-snippets/receive-speech-to-text-for-a-different-language.md b/telephony/code-snippets/receive-speech-to-text-for-a-different-language.md index e79b9bb0..e3ab4672 100644 --- a/telephony/code-snippets/receive-speech-to-text-for-a-different-language.md +++ b/telephony/code-snippets/receive-speech-to-text-for-a-different-language.md @@ -5,6 +5,8 @@ title: Receive Speech-to-Text for a different language in a Conversation import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This example goes over how you can use the Symbl Telephony API to receive a speech-to-text transcription Íof a conversation in many different languages. This example uses both the [Symbl's Javascript SDK](/docs/javascript-sdk/overview/introduction) and native Javascript. :::note diff --git a/telephony/concepts/concepts.md b/telephony/concepts/concepts.md index 11648e01..af981354 100644 --- a/telephony/concepts/concepts.md +++ b/telephony/concepts/concepts.md @@ -6,6 +6,8 @@ slug: /concepts/pstn-and-sip import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + This API supports dialing through a simple phone number - `PSTN` or a Voice Over IP system - `SIP` endpoint. If you don't have your own Voice over IP (VoIP) system, use a phone number to make the connection. ### Session Initiation Protocol (SIP) diff --git a/telephony/tutorials/connect-to-zoom-with-telephony-api.md b/telephony/tutorials/connect-to-zoom-with-telephony-api.md index 6ba87ab2..926a2e23 100644 --- a/telephony/tutorials/connect-to-zoom-with-telephony-api.md +++ b/telephony/tutorials/connect-to-zoom-with-telephony-api.md @@ -7,6 +7,8 @@ slug: /telephony/tutorials/connect-to-zoom import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + :::info This guide uses a **PSTN** connection to connect to Zoom. **PSTN** audio quality maxes out to 8KHz. You can also use a **[SIP-based connection](/docs/concepts/pstn-and-sip#sip-session-initiation-protocol)**, which captures audio at 16KHz and above. ::: diff --git a/telephony/tutorials/get-live-transcription-telephony-api.md b/telephony/tutorials/get-live-transcription-telephony-api.md index ca94568a..5f9e42d2 100644 --- a/telephony/tutorials/get-live-transcription-telephony-api.md +++ b/telephony/tutorials/get-live-transcription-telephony-api.md @@ -7,6 +7,8 @@ slug: /telephony/tutorials/connect-to-phone-call import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +--- + Get a live transcription in your Node.js application by making a call to a valid phone number. In this guide, we will walk you through how to get a live transcription and real-time AI insights, such as [follow-ups](/docs/concepts/follow-ups), [action items](/docs/concepts/action-items), [topics](/docs/concepts/topics) and [questions](/docs/conversation-api/questions) of a phone call using a PSTN or SIP connection. This application uses the Symbl Javascript SDK which requires the `symbl-node` node package. diff --git a/tutorials/summarization/adding-speaker-info.md b/tutorials/summarization/adding-speaker-info.md index b7d631fb..31388858 100644 --- a/tutorials/summarization/adding-speaker-info.md +++ b/tutorials/summarization/adding-speaker-info.md @@ -31,10 +31,6 @@ To send the speaker information, you can use the [POST Async Text API](/docs/asy You can use an email conversation as the input of the transcript content in the Async Text API. You can submit the email content in the request for the Async text API. - -> **Beginner Tip**
-If you are using the Summary API for the first time, the Async Text API is the simplest way to get started and work your way up to creating Summarization for recorded Audio and Video files. - - #### Async Audio/Async Video API You can choose from any of the following ways to provide speaker separation based on your scenario: From 3f9d84bbfbecf21ac793d586be56d5b6380d3e23 Mon Sep 17 00:00:00 2001 From: pema-s <81958801+pema-s@users.noreply.github.com> Date: Tue, 4 Jan 2022 22:25:09 +0530 Subject: [PATCH 64/64] WebSDK --- python-sdk/async-audio.md | 2 + .../muting-and-unmuting-connected-device.md | 36 ++ web-sdk/overview.md | 151 +++++++ web-sdk/passing-a-custom-sourceNode.md | 103 +++++ .../passing-custom-ondevicechange-handler.md | 55 +++ web-sdk/reconnecting-real-time.md | 80 ++++ web-sdk/stopping-realtime-connection.md | 19 + web-sdk/subscribe-to-realtime.md | 52 +++ ...nscribing-live-audio-through-microphone.md | 111 ++++++ web-sdk/web-sdk-reference.md | 367 ++++++++++++++++++ 10 files changed, 976 insertions(+) create mode 100644 web-sdk/muting-and-unmuting-connected-device.md create mode 100644 web-sdk/overview.md create mode 100644 web-sdk/passing-a-custom-sourceNode.md create mode 100644 web-sdk/passing-custom-ondevicechange-handler.md create mode 100644 web-sdk/reconnecting-real-time.md create mode 100644 web-sdk/stopping-realtime-connection.md create mode 100644 web-sdk/subscribe-to-realtime.md create mode 100644 web-sdk/transcribing-live-audio-through-microphone.md create mode 100644 web-sdk/web-sdk-reference.md diff --git a/python-sdk/async-audio.md b/python-sdk/async-audio.md index 33626203..ec6f301a 100644 --- a/python-sdk/async-audio.md +++ b/python-sdk/async-audio.md @@ -4,8 +4,10 @@ title: Async Audio API sidebar_label: Audio API slug: /python-sdk/async-audio --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; + --- The Python SDK allow you to asynchronously send audio conversation data and generate the following: diff --git a/web-sdk/muting-and-unmuting-connected-device.md b/web-sdk/muting-and-unmuting-connected-device.md new file mode 100644 index 00000000..1f864e54 --- /dev/null +++ b/web-sdk/muting-and-unmuting-connected-device.md @@ -0,0 +1,36 @@ +--- +id: muting-and-unmuting-connected-device +title: Muting and Unmuting Connected Device (Beta) +sidebar_label: Muting and Unmuting Connected Devic (Beta) +slug: /web-sdk/muting-and-unmuting-connected-device +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +You can mute and unmute the connected device by simply calling `symbl.mute()` or `symbl.unmute()`. + +### Muting +A quick snippet on how to use the mute method is given below: + +```js +(async () => { + const connection = await symbl.startRealtimeRequest(connectionConfig); + await symbl.mute(connection); +})(); +``` +### Unmuting +A quick snippet on how to use the unmute method is given below: + +```js +(async () => { + const connection = await symbl.startRealtimeRequest(connectionConfig); + await symbl.unmute(connection); +})(); +``` + + + + diff --git a/web-sdk/overview.md b/web-sdk/overview.md new file mode 100644 index 00000000..674b3b8d --- /dev/null +++ b/web-sdk/overview.md @@ -0,0 +1,151 @@ +--- +id: web-sdk +title: Symbl Web SDK (Beta) +sidebar_label: Introduction +slug: /web-sdk/overview +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +The Symbl Web SDK provides access to the Symbl APIs for applications directly in the browser. + +> **Source Code**
+Find the source code here: [https://github.com/symblai/symbl-web-sdk](https://github.com/symblai/symbl-web-sdk).
+ + +## Supported Browsers +--- + +|-- | Chrome | Edge Firefox | Firefox | Safari | +| -------| ---------- | ------- | ----- | ------- | +| macOS | ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) | +| Windows | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png)| ![icon](/img/tick-mark.png) | | +| Linux | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | +| iOS | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) | +| Android | ![icon](/img/tick-mark.png)| | ![icon](/img/tick-mark.png) | ![icon](/img/tick-mark.png) | + +## Setup +--- +**To use the Symbl Web SDK,** + +Include it via script tags in your HTML file: + +```html + +``` +or + +```html + +``` + +In case of a front-end web application using a framework such as React, import it in the ES2015 style: + +```bash +import symbl from "@symblai/symbl-web-sdk"; +``` + +## Initialization +--- +The `init` authenticates you to use the Symbl API using the provided authentication credentials. To get authentication credentials (App ID and Secret), follow the steps given in the [Authentication](/docs/developer-tools/authentication#step-1-get-your-api-credentials) page. + +You can authenticate: + +- [Using your API Credentials](#authenticate-using-api-credentials) + +                or + +- [Using your Auth Token](#authenticate-using-token) + +### Authenticate using API Credentials + +Use the code given below to authenticate using your App ID and App Secret. + +```js +sdk.init({ + // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai + appId: APP_ID, + appSecret: APP_SECRET, + basePath: 'https://api.symbl.ai' +}) +.then(() => console.log('SDK Initialized.')) +.catch(err => console.error('Error in initialization.', err)); +``` + +### Authenticate using Token + +Use the code given below to authenticate using the Auth Token. To generate the Auth Token follow the Steps given in the [Authentication](/docs/developer-tools/authentication#step-2-generate-the-access-token) Page. + +```js +sdk.init({ + accessToken: ACCESS_TOKEN_HERE, + basePath: 'https://api.symbl.ai' +}) +.then(() => console.log('SDK Initialized.')) +.catch(err => console.error('Error in initialization.', err)); +``` + + +:::note Web SDK in Labs +The Web SDK is available as a part of [Symbl Labs](/docs/labs) with select features. You can find the Web SDK Labs Readme here: [https://github.com/symblai/symbl-web-sdk/blob/labs/README.md](https://github.com/symblai/symbl-web-sdk/blob/labs/README.md) and the source code here: [https://github.com/symblai/symbl-web-sdk/tree/labs](https://github.com/symblai/symbl-web-sdk/tree/labs). +::: + +## Streaming API config options + +The full details of the Streaming API config options can be seen [here](https://docs.symbl.ai/docs/streaming-api/api-reference/#request-parameters). + +### Additional Web SDK configs +These are configs that have been added that are specific to the Web SDK. + +| Name | Default | Description | +| -------| ---------- | ------- | +| `sourceNode` | `null` | For passing in an external [MediaStreamAudioSourceNode](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamAudioSourceNode/MediaStreamAudioSourceNode) object. By default the Web SDK will handle audio context and source nodes on it's own, though if you wish to handle that externally we've provided that option. | +| `reconnectOnError` | `true` | If true the Web SDK will attempt to reconnect to the WebSocket in case of error. You can also make sure of our `onReconnectFail` callback which will fire in case the reconnection attempt fails. | + +### Usage Example + +```js +const id = btoa("my-first-symbl-ai-code"); + +const connectionConfig = { + id, + insightTypes: ['action_item', 'question'], + sourceNode: sourceNode, + reconnectOnError: true, + handlers: { // Read the handlers section for more + ondevicechange: () => { + alert('device changed!'); + }, + ... + } + ... +} + + +... + +// Creates the WebSocket in a non-processing state +const stream = await symbl.createStream(connectionConfig); + +// Send the start request +await symbl.unmute(stream); +``` + +## Tutorials +--- +We have prepared a list of tutorials to help you understand how to use the Web SDK. + +* [Transcribing Live Audio Input through Microphone](/docs/web-sdk/transcribing-live-audio-through-microphone) + +### Web SDK Reference +--- +The supported Handlers and Callbacks for the Web SDK are listed below: + +* [Event Handlers](/docs/javascript-sdk/reference#event-handlers-1) + * [onSpeechDetected](/docs/javascript-sdk/reference#onspeechdetected) + * [onMessageResponse](/docs/javascript-sdk/reference#onmessageresponse) + * [onInsightResponse](/docs/javascript-sdk/reference#oninsightresponse) + * [onTopicResponse](/docs/javascript-sdk/reference#ontopicresponse) \ No newline at end of file diff --git a/web-sdk/passing-a-custom-sourceNode.md b/web-sdk/passing-a-custom-sourceNode.md new file mode 100644 index 00000000..85f719fa --- /dev/null +++ b/web-sdk/passing-a-custom-sourceNode.md @@ -0,0 +1,103 @@ +--- +id: passing-custom-sourcenode +title: Passing a custom sourceNode (Beta) +sidebar_label: Passing a custom sourceNode (Beta) +slug: /web-sdk/passing-custom-sourcenode +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +If you wish you can pass in a custom `MediaStreamAudioSourceNode` object to the Web SDK. By default the Web SDK will create the AudioContext and the `MediaStreamAudioSourceNode` object automatically but using this will give you more control over those. + +Once you create the `MediaStreamAudioSourceNode` object you can pass it via the `connectionConfig` as sourceNode + +```js + +// create the MediaStreamAudioSourceNode +const AudioContext = window.AudioContext || window.webkitAudioContext; +stream = await navigator.mediaDevices.getUserMedia({ + audio: true, + video: false +}); +context = new AudioContext(); +const sourceNode = context.createMediaStreamSource(stream); + +symbl.init({ + appId: '', + appSecret: '', + // accessToken: '', // can be used instead of appId and appSecret + basePath: 'https://api-labs.symbl.ai', +}); + +const id = btoa("my-first-symbl-ai-code"); +// pass in the MediaStreamAudioSourceNode as sourceNode +const connectionConfig = { + id, + sourceNode, + insightTypes: ['action_item', 'question'], + config: { + meetingTitle: 'My Test Meeting ' + id, + confidenceThreshold: 0.7, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: 'en-US', + sampleRateHertz: 48000 + }, + speaker: { + // Optional, if not specified, will simply not send an email in the end. + userId: '', // Update with valid email + name: '' + }, + handlers: { + /** + * This will return live speech-to-text transcription of the call. + */ + onSpeechDetected: (data) => { + if (data) { + const {punctuated} = data + console.log('Live: ', punctuated && punctuated.transcript) + console.log(''); + } + // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); + }, + /** + * When processed messages are available, this callback will be called. + */ + onMessageResponse: (data) => { + // console.log('onMessageResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects an insight, this callback will be called. + */ + onInsightResponse: (data) => { + // console.log('onInsightResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects a topic, this callback will be called. + */ + onTopicResponse: (data) => { + // console.log('onTopicResponse', JSON.stringify(data, null, 2)) + } + } +}; + +(async () => { + // Creates the WebSocket in a non-processing state + const stream = await symbl.createStream(connectionConfig); + + // Send the start request + await stream.start(stream); +})(); +``` + +### Updating your external source node + +If you wish to update your external source node you can do se by using the `symbl.updateSourceNode` function: + +```js +symbl.updateSourceNode(stream, sourceNode); +``` + + diff --git a/web-sdk/passing-custom-ondevicechange-handler.md b/web-sdk/passing-custom-ondevicechange-handler.md new file mode 100644 index 00000000..8a494666 --- /dev/null +++ b/web-sdk/passing-custom-ondevicechange-handler.md @@ -0,0 +1,55 @@ +--- +id: passing-custom-ondevicechange-handler +title: Passing a custom ondevicechange handler (Beta) +sidebar_label: Passing a custom ondevicechange handler (Beta) +slug: /web-sdk/passing-custom-ondevicechange-handler +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +By default, the Symbl Web SDK will handle the `ondevicechange` event and send a `modify_request` event to modify the sample rate with the new device's sample rate. If you wish to override this logic you can pass in your own `ondevicechange` handler in the handlers config. + +```js +symbl.init({ + appId: '', + appSecret: '', + // accessToken: '', // can be used instead of appId and appSecret + basePath: 'https://api-labs.symbl.ai', +}); +const id = btoa("my-first-symbl-ai-code"); +// pass in the MediaStreamAudioSourceNode as sourceNode +const connectionConfig = { + id, + insightTypes: ['action_item', 'question'], + config: { + languageCode: 'en-US', + sampleRateHertz: 48000 + }, + handlers: { + ondevicechange: () => { + // add your logic here. + } + } +}; + +(async () => { + // Creates the WebSocket in a non-processing state + const stream = await symbl.createStream(connectionConfig); + + // Send the start request + await stream.start(stream); +})(); +``` +### Using the deviceChanged callback + +You can also make use of our callback using our `deviceChanged` callback: + +```js +symbl.deviceChanged = () => { + // Add your logic here +} +``` + diff --git a/web-sdk/reconnecting-real-time.md b/web-sdk/reconnecting-real-time.md new file mode 100644 index 00000000..0c19b3ef --- /dev/null +++ b/web-sdk/reconnecting-real-time.md @@ -0,0 +1,80 @@ +--- +id: reconnecting-real-time +title: Reconnecting to an Existing Real-time Connection (Beta) +sidebar_label: Reconnecting to an Existing Real-time Connection +slug: /web-sdk/reconnecting-real-time +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +In case a user closes their browser or has an interruption in their WebSocket connection, you can use the `store` object to grab the Connection ID you last used. + +```js +const id = symbl.store.get('connectionID'); + +const connectionConfig = { + id, + insightTypes: ['action_item', 'question'], + config: { + meetingTitle: 'My Test Meeting ' + id, + confidenceThreshold: 0.7, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: 'en-US', + sampleRateHertz: 44100 + }, + speaker: { + // Optional, if not specified, will simply not send an email in the end. + userId: '', // Update with valid email + name: '' + }, + handlers: { + /** + * This will return live speech-to-text transcription of the call. + */ + onSpeechDetected: (data) => { + if (data) { + const {punctuated} = data + console.log('Live: ', punctuated && punctuated.transcript) + console.log(''); + } + // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); + }, + /** + * When processed messages are available, this callback will be called. + */ + onMessageResponse: (data) => { + // console.log('onMessageResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects an insight, this callback will be called. + */ + onInsightResponse: (data) => { + // console.log('onInsightResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects a topic, this callback will be called. + */ + onTopicResponse: (data) => { + // console.log('onTopicResponse', JSON.stringify(data, null, 2)) + } + } +}; + +(async () => { + const connection = await symbl.startRealtimeRequest(connectionConfig); +})(); +``` + +The `startRealtimeRequest` connects to a Streaming API Web Socket endpoint using the provided configuration options. Read more about `startRealtimeRequest` [here](/docs/web-sdk/web-sdk-reference#startrealtimerequest). + +Read about the Streaming API parameters for `connectionConfig` [here](/docs/streaming-api/api-reference/#request-parameters). + +Read more about the supported Event Handlers: + +    👉   [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
+    👉   [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
+    👉   [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
+    👉   [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse) diff --git a/web-sdk/stopping-realtime-connection.md b/web-sdk/stopping-realtime-connection.md new file mode 100644 index 00000000..443268cd --- /dev/null +++ b/web-sdk/stopping-realtime-connection.md @@ -0,0 +1,19 @@ +--- +id: stop-realtime-connection +title: Stopping Realtime Connection (Beta) +sidebar_label: Stopping Realtime Connection (Beta) +slug: /web-sdk/stop-realtime-connection +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +In order to end the connection to the realtime WebSocket you'll need to use the following command with your connection object: + +```js +symbl.stopRequest(connection); +``` + +If you do not sever the connection you could use more minutes of time than intended, so it is recommended to always end the connection programmatically. \ No newline at end of file diff --git a/web-sdk/subscribe-to-realtime.md b/web-sdk/subscribe-to-realtime.md new file mode 100644 index 00000000..904a97eb --- /dev/null +++ b/web-sdk/subscribe-to-realtime.md @@ -0,0 +1,52 @@ +--- +id: subscribe-to-realtime +title: Subscribing to an existing realtime connection with Subscribe API (Beta) +sidebar_label: Subscribing to an existing realtime connection with Subscribe API (Beta) +slug: /web-sdk/subscribe-to-realtime +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +With the Subscribe API you can connect to an existing connection via the connection ID. You'll want to open this example in a different browser while the realtime transcription example is running. + +## Current call signature + +```js +symbl.subscribeToStream(id, { + reconnectOnError: true, + handlers: { + onMessage: (message) => { ... }, + onSubscribe: () => { ... }, + onClose: () => { ... }, + onReconnectFail: (err) => { ... }, + } +}); +``` + +## Deprecated call signature + +This way of using the `subscribeToSream` function has been deprecated. It will still work but might not in future versions. Please convert to the current call signature above. The function passed is equivalent to the `onMessage` handler in the new call signature. + +```js +symbl.subscribeToStream(id, (data) => { + console.log('data:', data); +}) +``` + +| Name | Description | +| -------| ---------- | +| `reconnectOnError` | `true` | If true the Web SDK will attempt to reconnect to the WebSocket in case of error. You can also make sure of our `onReconnectFail` callback which will fire in case the reconnection attempt fails.) | + +## Subscribe API Handlers + +| Name | Default value | Description | +| -------| ---------- | ------- | +| `onMessage(message)` | Fired any time a message is received. | If true the Web SDK will attempt to reconnect to the WebSocket in case of error. You can also make sure of our `onReconnectFail` callback which will fire in case the reconnection attempt fails.) | +| `onSubscribe()` | Fired when the connection intially subscribes. +| `onClose()` | Fired when the connection is closed. +| `onReconnectFail(err)` | Fires when the reconnection attempt fails. Related to the `reconnectOnError` config. | + + diff --git a/web-sdk/transcribing-live-audio-through-microphone.md b/web-sdk/transcribing-live-audio-through-microphone.md new file mode 100644 index 00000000..bf791a24 --- /dev/null +++ b/web-sdk/transcribing-live-audio-through-microphone.md @@ -0,0 +1,111 @@ +--- +id: transcribing-live-audio-through-microphone +title: Transcribing Live Audio through Microphone (Beta) +sidebar_label: Transcribing Live Audio through Microphone +slug: /web-sdk/transcribing-live-audio-through-microphone +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +As a simple test of the Streaming API you can simply setup a live microphone and push the audio stream using the browser APIs to access the microphone. + +Initialize the SDK and connect via the built-in websocket connector. This will output the live transcription to the console. + +### Initialize the SDK + +```js +symbl.init({ + appId: '', + appSecret: '', + // accessToken: '', // can be used instead of appId and appSecret + // basePath: '', +}); +``` + +You can get the `appId` and `appSecret` from the [Symbl Platform](https://platform.symbl.ai). +See the steps to get your API Credentials in the [Authentication](/docs/developer-tools/authentication) section. + +### Start the Connection and pass Configuration Options + +:::note Using createStream to start a realtime request +Creating a stream using `symbl.startRealtimeRequest(config)` has been deprecated in favor of `symbl.createStream(config)`. For createStream, the WebSocket is started in a non processing state. You must send the start request before processing any audio. + +After the stream is created, you need to call `symbl.start(stream)` to start the stream. +::: + +```js +symbl.init({ + appId: '', + appSecret: '', + // accessToken: '', // can be used instead of appId and appSecret + basePath: 'https://api-labs.symbl.ai', +}); + +const id = btoa("symbl-ai-is-the-best"); + +const connectionConfig = { + id, + insightTypes: ['action_item', 'question'], + config: { + meetingTitle: 'My Test Meeting ' + id, + confidenceThreshold: 0.7, + timezoneOffset: 480, // Offset in minutes from UTC + languageCode: 'en-US', + sampleRateHertz: 48000 + }, + speaker: { + // Optional, if not specified, will simply not send an email in the end. + userId: '', // Update with valid email + name: '' + }, + handlers: { + /** + * This will return live speech-to-text transcription of the call. + */ + onSpeechDetected: (data) => { + if (data) { + const {punctuated} = data + console.log('Live: ', punctuated && punctuated.transcript) + console.log(''); + } + // console.log('onSpeechDetected ', JSON.stringify(data, null, 2)); + }, + /** + * When processed messages are available, this callback will be called. + */ + onMessageResponse: (data) => { + // console.log('onMessageResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects an insight, this callback will be called. + */ + onInsightResponse: (data) => { + // console.log('onInsightResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects a topic, this callback will be called. + */ + onTopicResponse: (data) => { + // console.log('onTopicResponse', JSON.stringify(data, null, 2)) + } + } +}; + +(async () => { + // Creates the WebSocket in a non-processing state + const stream = await symbl.createStream(connectionConfig); + + // Send the start request + await stream.start(stream); +})(); +``` + +Read more about the supported Event Handlers: + +    👉   [onSpeechDetected](/docs/web-sdk/web-sdk-reference#onspeechdetected)
+    👉   [onMessageResponse](/docs/web-sdk/web-sdk-reference#onmessageresponse)
+    👉   [onInsightResponse](/docs/web-sdk/web-sdk-reference#oninsightresponse)
+    👉   [onTopicResponse](/docs/web-sdk/web-sdk-reference#ontopicresponse) \ No newline at end of file diff --git a/web-sdk/web-sdk-reference.md b/web-sdk/web-sdk-reference.md new file mode 100644 index 00000000..08a8ee51 --- /dev/null +++ b/web-sdk/web-sdk-reference.md @@ -0,0 +1,367 @@ +--- +id: web-sdk-reference +title: Web SDK Reference +slug: /web-sdk/web-sdk-reference +sidebar_label: Web SDK Reference +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +--- + +## Public Methods + +### init + +```init (String appId, String appSecret)``` + +Authenticates with the Symbl API using the provided authentication credentials. + +#### Parameters + +Name | Description +-----|------------ +`appId` | The Symbl Application ID you get from the [Symbl Platform](https://platform.symbl.ai) +`appSecret` | The Symbl Application Secret Token you get from the [Symbl Platform](https://platform.symbl.ai) +`basePath` | The base path of the endpoint. By default it is `https://api.symbl.ai`. +`accessToken` | The Symbl authentication Token you get from your `appId` and `appSecret`. This is an optional parameter you can use to authenticate using auth Token rather than the App ID and App Secret. See sample code [here](/docs/javascript-sdk/introduction#authenticate-using-token). + +#### Returns + +A Promise which is resolved once the API is connected and authenticated with Symbl. + +#### Code Example + +```js +sdk.init({ + // APP_ID and APP_SECRET come from the Symbl Platform: https://platform.symbl.ai + appId: APP_ID, + appSecret: APP_SECRET, + basePath: 'https://api.symbl.ai' +}) +.then(() => console.log('SDK Initialized.')) +.catch(err => console.error('Error in initialization.', err)); +``` +### startRealtimeRequest + +```startRealtimeRequest ( options)``` + +Connects to a [Streaming API](/docs/streamingapi/overview/introduction) Web Socket endpoint using the provided configuration options. + +#### Parameters + +Name | Description +-----|------------ +`options` | Options specified for the [Streaming API Configuration Object](https://docs.symbl.ai/docs/streaming-api/api-reference#request-parameters). + +#### Returns + +A Promise which is resolved once real-time request has been established. + +## Event Handlers + +When connecting using [`startRealtimeRequest`](#startRealtimeRequest), you can pass various handlers in the configuration options which be called if the specific event attached to the handler is fired. + +#### Code Example + +```js +handlers: { + /** + * This will return live speech-to-text transcription of the call. + */ + onSpeechDetected: (data) => { + console.log(JSON.stringify(data)) + if (data) { + const {punctuated} = data + console.log('Live: ', punctuated && punctuated.transcript) + } + }, + /** + * When processed messages are available, this callback will be called. + */ + onMessageResponse: (data) => { + console.log('onMessageResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects an insight, this callback will be called. + */ + onInsightResponse: (data) => { + console.log('onInsightResponse', JSON.stringify(data, null, 2)) + }, + /** + * When Symbl detects a topic, this callback will be called. + */ + onTopicResponse: (data) => { + console.log('onTopicResponse', JSON.stringify(data, null, 2)) + } +} +``` + +### onSpeechDetected + +To retrieve the real-time transcription results as soon as they are detected. You can use this callback to render live transcription which is specific to the speaker of this audio stream. + +#### onSpeechDetected JSON Response Example + +```js +{ + "type": "recognition_result", + "isFinal": true, + "payload": { + "raw": { + "alternatives": [{ + "words": [{ + "word": "Hello", + "startTime": { + "seconds": "3", + "nanos": "800000000" + }, + "endTime": { + "seconds": "4", + "nanos": "200000000" + } + }, { + "word": "world.", + "startTime": { + "seconds": "4", + "nanos": "200000000" + }, + "endTime": { + "seconds": "4", + "nanos": "800000000" + } + }], + "transcript": "Hello world.", + "confidence": 0.9128385782241821 + }] + } + }, + "punctuated": { + "transcript": "Hello world." + }, + "user": { + "userId": "emailAddress", + "name": "John Doe", + "id": "23681108-355b-4fc3-9d94-ed47dd39fa56" + } +} +``` + +### onMessageResponse + +This callback function contains the "finalized" transcription data for this speaker and if used with multiple streams with other speakers this callback would also provide their messages. + +The "finalized" messages mean that the automatic speech recognition has finalized the state of this part of transcription and has declared it "final". Therefore, this transcription will be more accurate than [`onSpeechDetected`](#onspeechdetected). + +#### onMessageResponse JSON Response Example + +```js +[{ + "from": { + "id": "0a7a36b1-047d-4d8c-8958-910317ed9edc", + "name": "John Doe", + "userId": "emailAddress" + }, + "payload": { + "content": "Hello world.", + "contentType": "text/plain" + }, + "id": "59c224c2-54c5-4762-9582-961bf250b478", + "channel": { + "id": "realtime-api" + }, + "metadata": { + "disablePunctuation": true, + "timezoneOffset": 480, + "originalContent": "Hello world.", + "words": "[{\"word\":\"Hello\",\"startTime\":\"2021-02-04T20:34:59.029Z\",\"endTime\":\"2021-02-04T20:34:59.429Z\"},{\"word\":\"world.\",\"startTime\":\"2021-02-04T20:34:59.429Z\",\"endTime\":\"2021-02-04T20:35:00.029Z\"}]", + "originalMessageId": "59c224c2-54c5-4762-9582-961bf250b478" + }, + "dismissed": false, + "duration": { + "startTime": "2021-02-04T20:34:59.029Z", + "endTime": "2021-02-04T20:35:00.029Z" + } +}] +``` + +### onInsightResponse + +This callback provides you with any of the detected insights in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every speaker's insights in case of multiple streams. + +#### onInsightResponse JSON Response Example + +```json +[{ + "id": "94020eb9-b688-4d56-945c-a7e5282258cc", + "confidence": 0.9909798145016999, + "messageReference": { + "id": "94020eb9-b688-4d56-945c-a7e5282258cc" + }, + "hints": [{ + "key": "informationScore", + "value": "0.9782608695652174" + }, { + "key": "confidenceScore", + "value": "0.9999962500210938" + }, { + "key": "comprehensionScore", + "value": "0.9983848333358765" + }], + "type": "action_item", + "assignee": { + "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0", + "name": "John Doe", + "userId": "emailAddress" + }, + "dueBy": { + "value": "2021-02-05T00:00:00-07:00" + }, + "tags": [{ + "type": "date", + "text": "today", + "beginOffset": 39, + "value": { + "value": { + "datetime": "2021-02-05" + } + } + }, { + "type": "person", + "text": "John Doe", + "beginOffset": 8, + "value": { + "value": { + "name": "John Doe", + "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0", + "assignee": true, + "userId": "emailAddress" + } + } + }], + "dismissed": false, + "payload": { + "content": "Perhaps John Doe can submit the report today.", + "contentType": "text/plain" + }, + "from": { + "id": "e2c5acf8-b9ed-421a-b3b3-02a5ae9796a0", + "name": "John Doe", + "userId": "emailAddress" + } +}] +``` + +### onTopicResponse + +This callback provides you with any of the detected topics in real-time as they are detected. As with the [`onMessageCallback`](#onmessagecallback) this would also return every topic in case of multiple streams. + +#### onTopicResponse JSON Response Example + +```json +[{ + "id": "e69a5556-6729-11eb-ab14-2aee2deabb1b", + "messageReferences": [{ + "id": "0df44422-0248-47e9-8814-e87f63404f2c", + "relation": "text instance" + }], + "phrases": "auto insurance", + "rootWords": [{ + "text": "auto" + }], + "score": 0.9, + "type": "topic" +}] +``` + +### onTrackerResponse (trackers) + +This callback provides you with any of the detected trackers in real-time as they are detected. As with the onMessageCallback this would also return every tracker in case of multiple streams. + +#### onTopicResponse JSON Response Example + +```json +{ + "type": "tracker_response", + "isFinal": true, + "trackers": [ + { + "name": "Goodness", + "matches": [ + { + "type": "vocabulary", + "value": "This is awesome", + "messageRefs": [ + { + "id": "fa93aa64-0e8d-4697-bb52-e2916ca63192", + "text": "This is awesome.", + "offset": 0 + } + ], + "insightRefs": [] + }, + { + "type": "vocabulary", + "value": "Hello world", + "messageRefs": [ + { + "id": "8e720656-fed7-4b11-b359-3931c53bbcec", + "text": "Hello world.", + "offset": 0 + } + ], + "insightRefs": [] + } + ] + }, + { + "name": "Goodness", + "matches": [ + { + "type": "vocabulary", + "value": "I like it", + "messageRefs": [ + { + "id": "193dc144-2b55-4214-b211-ab83bd3e4a2e", + "text": "I love it.", + "offset": -1 + } + ], + "insightRefs": [] + } + ] + } + ], + "sequenceNumber": 1 +} +``` +### onRequestError(err) + +Fires when the WebSocket has an error. + +### onConversationCompleted(message) + +Fires when the `conversation_completed` event is recieved from the WebSocket. + +### onReconnectFail(err) + +Fires when the reconnection attempt fails. Related to the `reconnectOnError` config. + +### onStartedListening(message) + +Fires when the `started_listening` event is received from the WebSocket. + +### onRequestStart(message) + +Fires when the `recognition_started` event is received from the WebSocket. + +### onRequestStop(message) + +Fires when the `recognition_stopped` event is received from the WebSocket. + +### onClose(event) + +Fires when the WebSocket connection closes for any reason. + +