diff --git a/_dashboards/dashboards-assistant/alert-insight.md b/_dashboards/dashboards-assistant/alert-insight.md new file mode 100644 index 0000000000..9e41b977ca --- /dev/null +++ b/_dashboards/dashboards-assistant/alert-insight.md @@ -0,0 +1,318 @@ +--- +layout: default +title: Alert insights +parent: OpenSearch Assistant for OpenSearch Dashboards +nav_order: 1 +has_children: false +--- + +# Alert insights + +This is an experimental feature and is not recommended for use in a production environment. For updates on the progress the feature or if you want to leave feedback, join the discussion in the [OpenSearch forum](https://forum.opensearch.org/). +{: .warning} + +The OpenSearch Dashboards Assistant alert insights help generate alert summaries and provide log patterns based on the logs that triggered the alert. + +## Configuring alert insights + +To configure alert insights, use the following steps. + +### Prerequisite + +Before using alert insights, you must have the `alerting` and `alerting-dashboards` plugins installed on your cluster. By default, these plugins are installed as part of standard OpenSearch distributions. For more information, see [Installing plugins]({{site.url}}{{site.baseurl}}/install-and-configure/plugins/). + +### Step 1: Enable alert insights + +To enable alert insights, configure the following `opensearch_dashboards.yml` setting: + +```yaml +assistant.alertInsight.enabled: true +``` +{% include copy.html %} + +### Step 2: Create the agents + +To orchestrate alert insights, you'll need to create the necessary [agents]({{site.url}}{{site.baseurl}}/ml-commons-plugin/agents-tools/index/#agents). Create a workflow template for creating all necessary agents by sending the following request: + +
+ + Request + + {: .text-delta} + +```json +POST /_plugins/_flow_framework/workflow?provision=true +{ + "name": "Alert Summary Agent", + "description": "Create Alert Summary Agent using Claude on BedRock", + "use_case": "REGISTER_AGENT", + "version": { + "template": "1.0.0", + "compatibility": ["2.17.0", "3.0.0"] + }, + "workflows": { + "provision": { + "user_params": {}, + "nodes": [ + { + "id": "create_claude_connector", + "type": "create_connector", + "previous_node_inputs": {}, + "user_inputs": { + "version": "1", + "name": "Claude instant runtime Connector", + "protocol": "aws_sigv4", + "description": "The connector to BedRock service for Claude model", + "actions": [ + { + "headers": { + "x-amz-content-sha256": "required", + "content-type": "application/json" + }, + "method": "POST", + "request_body": "{\"prompt\":\"\\n\\nHuman: ${parameters.prompt}\\n\\nAssistant:\", \"max_tokens_to_sample\":${parameters.max_tokens_to_sample}, \"temperature\":${parameters.temperature}, \"anthropic_version\":\"${parameters.anthropic_version}\" }", + "action_type": "predict", + "url": "https://bedrock-runtime.us-west-2.amazonaws.com/model/anthropic.claude-instant-v1/invoke" + } + ], + "credential": { + "access_key": "", + "secret_key": "", + "session_token": "" + }, + "parameters": { + "region": "us-west-2", + "endpoint": "bedrock-runtime.us-west-2.amazonaws.com", + "content_type": "application/json", + "auth": "Sig_V4", + "max_tokens_to_sample": "8000", + "service_name": "bedrock", + "temperature": "0.0001", + "response_filter": "$.completion", + "anthropic_version": "bedrock-2023-05-31" + } + } + }, + { + "id": "register_claude_model", + "type": "register_remote_model", + "previous_node_inputs": { + "create_claude_connector": "connector_id" + }, + "user_inputs": { + "description": "Claude model", + "deploy": true, + "name": "claude-instant" + } + }, + { + "id": "create_alert_summary_ml_model_tool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "parameters": { + "prompt": "You are an OpenSearch Alert Assistant to help summarize the alerts.\n Here is the detail of alert: ${parameters.context};\n The question is: ${parameters.question}." + }, + "name": "MLModelTool", + "type": "MLModelTool" + } + }, + { + "id": "create_alert_summary_agent", + "type": "register_agent", + "previous_node_inputs": { + "create_alert_summary_ml_model_tool": "tools" + }, + "user_inputs": { + "parameters": {}, + "type": "flow", + "name": "Alert Summary Agent", + "description": "this is an alert summary agent" + } + } + ] + } + } +} +``` +{% include copy-curl.html %} + +
+ +For sample agent templates, see [Flow Framework sample templates](https://github.com/opensearch-project/flow-framework/tree/2.x/sample-templates). Note the agent ID; you'll use it in the following step. + +For this example, use the templates to create the following agents: +- An alert insights agent, see [flow template](https://github.com/opensearch-project/flow-framework/blob/2.x/sample-templates/create-knowledge-base-alert-agent.json) +- Two summary agents: + - A basic alert summary agent, see [flow template](https://github.com/opensearch-project/flow-framework/blob/2.x/sample-templates/alert-summary-agent-claude-tested.json) + - An agent for an alert summary that includes log patterns, see [flow template](https://github.com/opensearch-project/flow-framework/blob/2.x/sample-templates/alert-summary-log-pattern-agent.json) + + These agents require different prompts. The prompt for the log patterns summary must include a placeholder `${parameters.topNLogPatternData}` and additional instructions to guide the LLM on using this information effectively. Note that log patterns are available only for query monitors created using OpenSearch Dashboards. + +### Step 3: Create the root agents + +Next, create [root agents]({{site.url}}{{site.baseurl}}/automating-configurations/workflow-tutorial/#root_agent) for agents created in the previous step. + +Create a root agent for the alert summary agent: + +```json +POST /.plugins-ml-config/_doc/os_summary +{ + "type": "os_root_agent", + "configuration": { + "agent_id": "" + } +} +``` +{% include copy-curl.html %} + +Create a root agent for the alert summary with log patterns agent: + +```json +POST /.plugins-ml-config/_doc/os_summary_with_log_pattern +{ + "type": "os_root_agent", + "configuration": { + "agent_id": "" + } +} +``` +{% include copy-curl.html %} + +Create a root agent for the alert insights agent: + +```json +POST /.plugins-ml-config/_doc/os_insight +{ + "type": "os_root_agent", + "configuration": { + "agent_id": "" + } +} +``` +{% include copy-curl.html %} + +The created `os_insight` agent provides alert insights related to OpenSearch cluster metrics. For insights about alerts unrelated to OpenSearch cluster metrics, you need to register an agent with [this template](https://github.com/opensearch-project/flow-framework/blob/2.x/sample-templates/create-knowledge-base-alert-agent.json) and change the agent name to `KB_For_Alert_Insight`. +{: .note} + +### Step 4: Test the agents + +You can verify that the agents were created successfully by calling the agents with an example payload. + +To test the alert summary agent, send the following request: + +```json +POST /_plugins/_ml/agents//_execute +{ + "parameters": { + "question": "Please summarize this alert, do not use any tool.", + "context": "\n Here is the detail information about alert Error log over 100\n ### Monitor definition\n {\"type\":\"monitor\",\"schema_version\":8,\"name\":\"loghub-apache-error-log\",\"monitor_type\":\"query_level_monitor\",\"enabled\":false,\"enabled_time\":null,\"schedule\":{\"period\":{\"interval\":1,\"unit\":\"MINUTES\"}},\"inputs\":[{\"search\":{\"indices\":[\"loghub-apache-new\"],\"query\":{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"10/12/24 11:21 am CST||-1000000h\",\"to\":\"10/12/24 11:21 am CST\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}}}}],\"triggers\":[{\"query_level_trigger\":{\"id\":\"NAq7fpIBRJyww-JMjwP_\",\"name\":\"Error log over 100\",\"severity\":\"1\",\"condition\":{\"script\":{\"source\":\"ctx.results[0].hits.total.value > 100\",\"lang\":\"painless\"}},\"actions\":[]}}],\"last_update_time\":1728714554388,\"owner\":\"alerting\",\"associated_workflows\":[],\"associatedCompositeMonitorCnt\":0,\"item_type\":\"query_level_monitor\",\"id\":\"NQq7fpIBRJyww-JMkAMC\",\"version\":3}\n\n ### Active Alert\n {\"ACTIVE\":1,\"ACKNOWLEDGED\":0,\"ERROR\":0,\"total\":1,\"alerts\":[{\"id\":\"Wgq8fpIBRJyww-JMegNr\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"workflow_id\":\"\",\"workflow_name\":\"\",\"associated_alert_ids\":[],\"schema_version\":5,\"monitor_version\":1,\"monitor_name\":\"loghub-apache-error-log\",\"execution_id\":\"NQq7fpIBRJyww-JMkAMC_2024-10-12T03:18:54.311214115_22d189ce-5e93-4927-b8bb-bcf61b7537e3\",\"trigger_id\":\"NAq7fpIBRJyww-JMjwP_\",\"trigger_name\":\"Error log over 100\",\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\":[],\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"end_time\":null,\"acknowledged_time\":null,\"alert_source\":\"monitor\"}],\"trigger_name\":\"Error log over 100\",\"severity\":\"1\",\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"monitor_name\":\"loghub-apache-error-log\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"alert_source\":\"monitor\",\"triggerID\":\"NAq7fpIBRJyww-JMjwP_\"}\n\n ### Value triggers this alert\n 595\n\n ### Alert query DSL {\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"2024-10-12T03:21:54+00:00||-1000000h\",\"to\":\"2024-10-12T03:21:54+00:00\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}} \n", + } +} +``` +{% include copy-curl.html %} + +To test the alert summary with log patterns agent, send the following request: + +```json +POST /_plugins/_ml/agents//_execute +{ + "parameters": { + "question": "Please summarize this alert, do not use any tool.", + "context": "\n Here is the detail information about alert Error log over 100\n ### Monitor definition\n {\"type\":\"monitor\",\"schema_version\":8,\"name\":\"loghub-apache-error-log\",\"monitor_type\":\"query_level_monitor\",\"enabled\":false,\"enabled_time\":null,\"schedule\":{\"period\":{\"interval\":1,\"unit\":\"MINUTES\"}},\"inputs\":[{\"search\":{\"indices\":[\"loghub-apache-new\"],\"query\":{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"10/12/24 11:21 am CST||-1000000h\",\"to\":\"10/12/24 11:21 am CST\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}}}}],\"triggers\":[{\"query_level_trigger\":{\"id\":\"NAq7fpIBRJyww-JMjwP_\",\"name\":\"Error log over 100\",\"severity\":\"1\",\"condition\":{\"script\":{\"source\":\"ctx.results[0].hits.total.value > 100\",\"lang\":\"painless\"}},\"actions\":[]}}],\"last_update_time\":1728714554388,\"owner\":\"alerting\",\"associated_workflows\":[],\"associatedCompositeMonitorCnt\":0,\"item_type\":\"query_level_monitor\",\"id\":\"NQq7fpIBRJyww-JMkAMC\",\"version\":3}\n\n ### Active Alert\n {\"ACTIVE\":1,\"ACKNOWLEDGED\":0,\"ERROR\":0,\"total\":1,\"alerts\":[{\"id\":\"Wgq8fpIBRJyww-JMegNr\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"workflow_id\":\"\",\"workflow_name\":\"\",\"associated_alert_ids\":[],\"schema_version\":5,\"monitor_version\":1,\"monitor_name\":\"loghub-apache-error-log\",\"execution_id\":\"NQq7fpIBRJyww-JMkAMC_2024-10-12T03:18:54.311214115_22d189ce-5e93-4927-b8bb-bcf61b7537e3\",\"trigger_id\":\"NAq7fpIBRJyww-JMjwP_\",\"trigger_name\":\"Error log over 100\",\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\":[],\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"end_time\":null,\"acknowledged_time\":null,\"alert_source\":\"monitor\"}],\"trigger_name\":\"Error log over 100\",\"severity\":\"1\",\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"monitor_name\":\"loghub-apache-error-log\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"alert_source\":\"monitor\",\"triggerID\":\"NAq7fpIBRJyww-JMjwP_\"}\n\n ### Value triggers this alert\n 595\n\n ### Alert query DSL {\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"2024-10-12T03:21:54+00:00||-1000000h\",\"to\":\"2024-10-12T03:21:54+00:00\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}} \n", + "topNLogPatternData": "[[539,["[Sun Dec 04 07:12:44 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 06:19:18 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 07:18:00 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 16:52:49 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 06:59:47 2005] [error] mod_jk child workerEnv in error state 8","[Sun Dec 04 07:11:22 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 07:18:00 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 17:01:47 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 17:31:12 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 05:04:04 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 20:24:49 2005] [error] mod_jk child workerEnv in error state 8","[Sun Dec 04 06:16:23 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 20:47:17 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 06:30:43 2005] [error] mod_jk child workerEnv in error state 6","[Mon Dec 05 06:35:27 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 07:07:30 2005] [error] mod_jk child workerEnv in error state 8","[Sun Dec 04 07:18:00 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 16:32:56 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 17:01:47 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 16:52:49 2005] [error] mod_jk child workerEnv in error state 8"],"[ :: ] [] _ "],[32,["[Sun Dec 04 14:29:00 2005] [error] [client 4.245.93.87] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 08:54:17 2005] [error] [client 147.31.138.75] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 17:34:57 2005] [error] [client 61.138.216.82] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 07:45:45 2005] [error] [client 63.13.186.196] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 10:53:30 2005] [error] [client 218.76.139.20] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 10:48:48 2005] [error] [client 67.166.248.235] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 15:18:36 2005] [error] [client 67.154.58.130] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 01:30:32 2005] [error] [client 211.62.201.48] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 16:45:04 2005] [error] [client 216.216.185.130] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 17:31:39 2005] [error] [client 218.75.106.250] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 19:00:56 2005] [error] [client 68.228.3.15] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 19:14:09 2005] [error] [client 61.220.139.68] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 09:35:12 2005] [error] [client 207.203.80.15] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 10:28:44 2005] [error] [client 198.232.168.9] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 16:24:05 2005] [error] [client 58.225.62.140] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 17:53:43 2005] [error] [client 218.39.132.175] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 12:33:13 2005] [error] [client 208.51.151.210] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 15:59:01 2005] [error] [client 24.83.37.136] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 11:42:43 2005] [error] [client 216.127.124.16] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 05:15:09 2005] [error] [client 222.166.160.184] Directory index forbidden by rule: /var/www/html/"],"[ :: ] [] [ ...] : ////"],[12,["[Sun Dec 04 20:47:17 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 20:47:17 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 07:57:02 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 17:43:12 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 20:47:17 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 20:47:16 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 07:57:02 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 17:43:12 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2"],"[ :: ] [] _ -"]]" + } +} +``` +{% include copy-curl.html %} + +To test the alert insights agent, send the following request: + +```json +POST /_plugins/_ml/agents//_execute +{ + "parameters": { + "question": "Please provide your insight on this alerts.", + "context": "\n Here is the detail information about alert Error log over 100\n ### Monitor definition\n {\"type\":\"monitor\",\"schema_version\":8,\"name\":\"loghub-apache-error-log\",\"monitor_type\":\"query_level_monitor\",\"enabled\":false,\"enabled_time\":null,\"schedule\":{\"period\":{\"interval\":1,\"unit\":\"MINUTES\"}},\"inputs\":[{\"search\":{\"indices\":[\"loghub-apache-new\"],\"query\":{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"10/12/24 11:21 am CST||-1000000h\",\"to\":\"10/12/24 11:21 am CST\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}}}}],\"triggers\":[{\"query_level_trigger\":{\"id\":\"NAq7fpIBRJyww-JMjwP_\",\"name\":\"Error log over 100\",\"severity\":\"1\",\"condition\":{\"script\":{\"source\":\"ctx.results[0].hits.total.value > 100\",\"lang\":\"painless\"}},\"actions\":[]}}],\"last_update_time\":1728714554388,\"owner\":\"alerting\",\"associated_workflows\":[],\"associatedCompositeMonitorCnt\":0,\"item_type\":\"query_level_monitor\",\"id\":\"NQq7fpIBRJyww-JMkAMC\",\"version\":3}\n\n ### Active Alert\n {\"ACTIVE\":1,\"ACKNOWLEDGED\":0,\"ERROR\":0,\"total\":1,\"alerts\":[{\"id\":\"Wgq8fpIBRJyww-JMegNr\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"workflow_id\":\"\",\"workflow_name\":\"\",\"associated_alert_ids\":[],\"schema_version\":5,\"monitor_version\":1,\"monitor_name\":\"loghub-apache-error-log\",\"execution_id\":\"NQq7fpIBRJyww-JMkAMC_2024-10-12T03:18:54.311214115_22d189ce-5e93-4927-b8bb-bcf61b7537e3\",\"trigger_id\":\"NAq7fpIBRJyww-JMjwP_\",\"trigger_name\":\"Error log over 100\",\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\":[],\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"end_time\":null,\"acknowledged_time\":null,\"alert_source\":\"monitor\"}],\"trigger_name\":\"Error log over 100\",\"severity\":\"1\",\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"monitor_name\":\"loghub-apache-error-log\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"alert_source\":\"monitor\",\"triggerID\":\"NAq7fpIBRJyww-JMjwP_\"}\n\n ### Value triggers this alert\n 595\n\n ### Alert query DSL {\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"2024-10-12T03:21:54+00:00||-1000000h\",\"to\":\"2024-10-12T03:21:54+00:00\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}} \n", + "summary": + } +} +``` +{% include copy-curl.html %} + +## Generating an alert summary + +You can generate an alert summary by calling the `/api/assistant/summary` API endpoint. To generate an alert summary, the fields `index`, `dsl`, and `topNLogPatternData` are optional. If all three fields are provided, the agent will provide a summary with log pattern analysis; otherwise, it will provide a general summary: + +```json +POST /api/assistant/summary +{ + "summaryType": "alerts", + "question": "Please summarize this alert, do not use any tool.", + "context": "\n Here is the detail information about alert Error log over 100\n ### Monitor definition\n {\"type\":\"monitor\",\"schema_version\":8,\"name\":\"loghub-apache-error-log\",\"monitor_type\":\"query_level_monitor\",\"enabled\":false,\"enabled_time\":null,\"schedule\":{\"period\":{\"interval\":1,\"unit\":\"MINUTES\"}},\"inputs\":[{\"search\":{\"indices\":[\"loghub-apache-new\"],\"query\":{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"10/12/24 11:21 am CST||-1000000h\",\"to\":\"10/12/24 11:21 am CST\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}}}}],\"triggers\":[{\"query_level_trigger\":{\"id\":\"NAq7fpIBRJyww-JMjwP_\",\"name\":\"Error log over 100\",\"severity\":\"1\",\"condition\":{\"script\":{\"source\":\"ctx.results[0].hits.total.value > 100\",\"lang\":\"painless\"}},\"actions\":[]}}],\"last_update_time\":1728714554388,\"owner\":\"alerting\",\"associated_workflows\":[],\"associatedCompositeMonitorCnt\":0,\"item_type\":\"query_level_monitor\",\"id\":\"NQq7fpIBRJyww-JMkAMC\",\"version\":3}\n\n ### Active Alert\n {\"ACTIVE\":1,\"ACKNOWLEDGED\":0,\"ERROR\":0,\"total\":1,\"alerts\":[{\"id\":\"Wgq8fpIBRJyww-JMegNr\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"workflow_id\":\"\",\"workflow_name\":\"\",\"associated_alert_ids\":[],\"schema_version\":5,\"monitor_version\":1,\"monitor_name\":\"loghub-apache-error-log\",\"execution_id\":\"NQq7fpIBRJyww-JMkAMC_2024-10-12T03:18:54.311214115_22d189ce-5e93-4927-b8bb-bcf61b7537e3\",\"trigger_id\":\"NAq7fpIBRJyww-JMjwP_\",\"trigger_name\":\"Error log over 100\",\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\":[],\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"end_time\":null,\"acknowledged_time\":null,\"alert_source\":\"monitor\"}],\"trigger_name\":\"Error log over 100\",\"severity\":\"1\",\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"monitor_name\":\"loghub-apache-error-log\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"alert_source\":\"monitor\",\"triggerID\":\"NAq7fpIBRJyww-JMjwP_\"}\n\n ### Value triggers this alert\n 595\n\n ### Alert query DSL {\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"2024-10-12T03:21:54+00:00||-1000000h\",\"to\":\"2024-10-12T03:21:54+00:00\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}} \n", + "index": "loghub-apache-new", + "dsl": "{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"2024-10-12T03:21:54+00:00||-1000000h\",\"to\":\"2024-10-12T03:21:54+00:00\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}}", + "topNLogPatternData": "[[539,["[Sun Dec 04 07:12:44 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 06:19:18 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 07:18:00 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 16:52:49 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 06:59:47 2005] [error] mod_jk child workerEnv in error state 8","[Sun Dec 04 07:11:22 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 07:18:00 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 17:01:47 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 17:31:12 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 05:04:04 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 20:24:49 2005] [error] mod_jk child workerEnv in error state 8","[Sun Dec 04 06:16:23 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 20:47:17 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 06:30:43 2005] [error] mod_jk child workerEnv in error state 6","[Mon Dec 05 06:35:27 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 07:07:30 2005] [error] mod_jk child workerEnv in error state 8","[Sun Dec 04 07:18:00 2005] [error] mod_jk child workerEnv in error state 7","[Sun Dec 04 16:32:56 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 17:01:47 2005] [error] mod_jk child workerEnv in error state 6","[Sun Dec 04 16:52:49 2005] [error] mod_jk child workerEnv in error state 8"],"[ :: ] [] _ "],[32,["[Sun Dec 04 14:29:00 2005] [error] [client 4.245.93.87] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 08:54:17 2005] [error] [client 147.31.138.75] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 17:34:57 2005] [error] [client 61.138.216.82] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 07:45:45 2005] [error] [client 63.13.186.196] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 10:53:30 2005] [error] [client 218.76.139.20] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 10:48:48 2005] [error] [client 67.166.248.235] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 15:18:36 2005] [error] [client 67.154.58.130] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 01:30:32 2005] [error] [client 211.62.201.48] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 16:45:04 2005] [error] [client 216.216.185.130] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 17:31:39 2005] [error] [client 218.75.106.250] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 19:00:56 2005] [error] [client 68.228.3.15] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 19:14:09 2005] [error] [client 61.220.139.68] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 09:35:12 2005] [error] [client 207.203.80.15] Directory index forbidden by rule: /var/www/html/","[Mon Dec 05 10:28:44 2005] [error] [client 198.232.168.9] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 16:24:05 2005] [error] [client 58.225.62.140] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 17:53:43 2005] [error] [client 218.39.132.175] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 12:33:13 2005] [error] [client 208.51.151.210] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 15:59:01 2005] [error] [client 24.83.37.136] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 11:42:43 2005] [error] [client 216.127.124.16] Directory index forbidden by rule: /var/www/html/","[Sun Dec 04 05:15:09 2005] [error] [client 222.166.160.184] Directory index forbidden by rule: /var/www/html/"],"[ :: ] [] [ ...] : ////"],[12,["[Sun Dec 04 20:47:17 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 20:47:17 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 07:57:02 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 17:43:12 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 20:47:17 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 20:47:16 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 07:57:02 2005] [error] mod_jk child init 1 -2","[Sun Dec 04 17:43:12 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2","[Mon Dec 05 11:06:52 2005] [error] mod_jk child init 1 -2"],"[ :: ] [] _ -"]]" +} +``` +{% include copy-curl.html %} + +The following table describes the Assistant Summary API parameters. + +Parameter | Required/Optional | Description +:--- | :--- | :--- +`summaryType` | Required | Specifies the type of application calling this API. Use `alerts` for alert insights. +`question` | Required | Specifies the user's question regarding alert insights. Default is `Please summarize this alert, do not use any tool.` +`context` | Required | Provides context for the alert, including the alert monitor definition, active alerts, and trigger values. +`index` | Optional | The index that the alert monitors. If this parameter is not provided, log pattern analysis is not returned. +`dsl` | Optional | The DSL query for alert monitoring. If this parameter is not provided, log pattern analysis is not returned. +`topNLogPatternData` | Optional | Log patterns for the alert trigger data. If this parameter is not provided, log pattern analysis is not returned. + +## Generating alert insights + +You can generate alert insights by calling the `/api/assistant/insight` API endpoint. To generate alert insights, all of the following parameters are required: + +```json +POST /api/assistant/insight +{ + "summaryType": "alerts", + "insightType": "user_insight" + "context": "\n Here is the detail information about alert Error log over 100\n ### Monitor definition\n {\"type\":\"monitor\",\"schema_version\":8,\"name\":\"loghub-apache-error-log\",\"monitor_type\":\"query_level_monitor\",\"enabled\":false,\"enabled_time\":null,\"schedule\":{\"period\":{\"interval\":1,\"unit\":\"MINUTES\"}},\"inputs\":[{\"search\":{\"indices\":[\"loghub-apache-new\"],\"query\":{\"size\":0,\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"10/12/24 11:21 am CST||-1000000h\",\"to\":\"10/12/24 11:21 am CST\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}}}}],\"triggers\":[{\"query_level_trigger\":{\"id\":\"NAq7fpIBRJyww-JMjwP_\",\"name\":\"Error log over 100\",\"severity\":\"1\",\"condition\":{\"script\":{\"source\":\"ctx.results[0].hits.total.value > 100\",\"lang\":\"painless\"}},\"actions\":[]}}],\"last_update_time\":1728714554388,\"owner\":\"alerting\",\"associated_workflows\":[],\"associatedCompositeMonitorCnt\":0,\"item_type\":\"query_level_monitor\",\"id\":\"NQq7fpIBRJyww-JMkAMC\",\"version\":3}\n\n ### Active Alert\n {\"ACTIVE\":1,\"ACKNOWLEDGED\":0,\"ERROR\":0,\"total\":1,\"alerts\":[{\"id\":\"Wgq8fpIBRJyww-JMegNr\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"workflow_id\":\"\",\"workflow_name\":\"\",\"associated_alert_ids\":[],\"schema_version\":5,\"monitor_version\":1,\"monitor_name\":\"loghub-apache-error-log\",\"execution_id\":\"NQq7fpIBRJyww-JMkAMC_2024-10-12T03:18:54.311214115_22d189ce-5e93-4927-b8bb-bcf61b7537e3\",\"trigger_id\":\"NAq7fpIBRJyww-JMjwP_\",\"trigger_name\":\"Error log over 100\",\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\":[],\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"end_time\":null,\"acknowledged_time\":null,\"alert_source\":\"monitor\"}],\"trigger_name\":\"Error log over 100\",\"severity\":\"1\",\"start_time\":\"10/12/24 11:18 am CST\",\"last_notification_time\":\"10/12/24 11:21 am CST\",\"monitor_name\":\"loghub-apache-error-log\",\"monitor_id\":\"NQq7fpIBRJyww-JMkAMC\",\"alert_source\":\"monitor\",\"triggerID\":\"NAq7fpIBRJyww-JMjwP_\"}\n\n ### Value triggers this alert\n 595\n\n ### Alert query DSL {\"query\":{\"bool\":{\"filter\":[{\"range\":{\"Time\":{\"from\":\"2024-10-12T03:21:54+00:00||-1000000h\",\"to\":\"2024-10-12T03:21:54+00:00\",\"include_lower\":true,\"include_upper\":true,\"boost\":1}}},{\"term\":{\"Level\":{\"value\":\"error\",\"boost\":1}}}],\"adjust_pure_negative\":true,\"boost\":1}}} \n", + "question": "Please provide your insight on this alerts.", + "summary": +} +``` +{% include copy-curl.html %} + +The following table describes the Assistant Insight API parameters. + +Parameter | Required/Optional | Description +:--- | :--- | :--- +`summaryType` | Required | Specifies the type of application calling this API. Use `alerts` for alert insights. +`insightType` | Required | Defines the alert type. Use `os_insight` for cluster metrics alerts and `user_insight` for other alert types. +`question` | Required | Specifies the user's question regarding alert insights. Default is `Please provide your insight on this alerts.` +`context` | Required | Provides context for the alert, including the alert monitor definition, active alerts, and trigger values. +`summary` | Required | The result returned by the alert summary agent. + + +## Viewing alert insights in OpenSearch Dashboards + +Before viewing alert insights, you must configure alerts in OpenSearch Dashboards. For more information, see [Alerting]({{site.url}}{{site.baseurl}}/observing-your-data/alerting/index/). + +To view alert insights in OpenSearch Dashboards, use the following steps: + +1. On the top menu bar, go to **OpenSearch Plugins > Alerting**. All alerts are displayed. + +1. Hover over the alerts for your desired monitor. If you configured alert insights, you will see a sparkle icon ({::nomarkdown}sparkle icon{:/}) next to the alerts in the **Alerts** column, as shown in the following image. + + Alerting page with sparkle icon + +1. Select the alerts label or the sparkle icon. You will see the generated summary, as shown in the following image. + + Alert summary + +1. Select the information icon ({::nomarkdown}info icon{:/}) to view alert insights. You will see the generated alert insights, as shown in the following image. + + Alert insights \ No newline at end of file diff --git a/_dashboards/dashboards-assistant/data-summary.md b/_dashboards/dashboards-assistant/data-summary.md new file mode 100644 index 0000000000..5ad465be62 --- /dev/null +++ b/_dashboards/dashboards-assistant/data-summary.md @@ -0,0 +1,291 @@ +--- +layout: default +title: Data summary +parent: OpenSearch Assistant for OpenSearch Dashboards +nav_order: 1 +has_children: false +--- + +# Data summary + +This is an experimental feature and is not recommended for use in a production environment. For updates on the progress of the feature or if you want to leave feedback, join the discussion on the [OpenSearch forum](https://forum.opensearch.org/). +{: .warning} + +The OpenSearch Dashboards Assistant data summary feature uses large language models (LLMs) to help you generate summaries for data stored in OpenSearch indexes. This tool provides an efficient way to gain insights from large datasets, making it easier to understand and act on the information contained in your OpenSearch indexes. + +## Configuration + +To configure the data summary feature, use the following steps. + +### Prerequisite + +Before using the data summary feature, enable query enhancements in OpenSearch Dashboards as follows: + +1. On the top menu bar, go to **Management > Dashboards Management**. +1. In the left navigation pane, select **Advanced settings**. +1. On the settings page, toggle **Enable query enhancements** to **On**. + +### Step 1: Enable the data summary feature + +To enable the data summary feature, configure the following `opensearch_dashboards.yml` setting: + +```yaml +queryEnhancements.queryAssist.summary.enabled: true +``` +{% include copy.html %} + +### Step 2: Create a data summary agent + +To orchestrate data summarization, create a data summary [agent]({{site.url}}{{site.baseurl}}/ml-commons-plugin/agents-tools/index/#agents). To create an agent, send a `POST /_plugins/_ml/agents/_register` request and provide the agent template as a payload: + +
+ + Request + + {: .text-delta} + +```json +POST /_plugins/_ml/agents/_register +{ + "name": "Query Assist Agent", + "description": "Create a Query Assist Agent using Claude on BedRock", + "use_case": "REGISTER_AGENT", + "version": { + "template": "1.0.0", + "compatibility": ["2.13.0", "3.0.0"] + }, + "workflows": { + "provision": { + "user_params": {}, + "nodes": [ + { + "id": "create_claude_connector", + "type": "create_connector", + "previous_node_inputs": {}, + "user_inputs": { + "version": "1", + "name": "Claude instant runtime Connector", + "protocol": "aws_sigv4", + "description": "The connector to BedRock service for Claude model", + "actions": [ + { + "headers": { + "x-amz-content-sha256": "required", + "content-type": "application/json" + }, + "method": "POST", + "request_body": "{\"prompt\":\"${parameters.prompt}\", \"max_tokens_to_sample\":${parameters.max_tokens_to_sample}, \"temperature\":${parameters.temperature}, \"anthropic_version\":\"${parameters.anthropic_version}\" }", + "action_type": "predict", + "url": "https://bedrock-runtime.us-west-2.amazonaws.com/model/anthropic.claude-instant-v1/invoke" + } + ], + "credential": { + "access_key": "", + "secret_key": "", + "session_token": "" + }, + "parameters": { + "region": "us-west-2", + "endpoint": "bedrock-runtime.us-west-2.amazonaws.com", + "content_type": "application/json", + "auth": "Sig_V4", + "max_tokens_to_sample": "8000", + "service_name": "bedrock", + "temperature": "0.0001", + "response_filter": "$.completion", + "anthropic_version": "bedrock-2023-05-31" + } + } + }, + { + "id": "register_claude_model", + "type": "register_remote_model", + "previous_node_inputs": { + "create_claude_connector": "connector_id" + }, + "user_inputs": { + "description": "Claude model", + "deploy": true, + "name": "claude-instant", + "guardrails": { + "type": "local_regex", + "input_guardrail": { + "stop_words": [ + { + "index_name": "words0", + "source_fields": ["title"] + } + ], + "regex": ["regex1", "regex2"] + }, + "output_guardrail": { + "stop_words": [ + { + "index_name": "words0", + "source_fields": ["title"] + } + ], + "regex": ["regex1", "regex2"] + } + } + } + }, + { + "id": "TransferQuestionToPPLAndExecuteTool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "type": "PPLTool", + "name": "TransferQuestionToPPLAndExecuteTool", + "description": "Use this tool to transfer natural language to generate PPL and execute PPL to query inside. Use this tool after you know the index name, otherwise, call IndexRoutingTool first. The input parameters are: {index:IndexName, question:UserQuestion}", + "parameters": { + "response_filter": "$.completion", + "execute": false + }, + "include_output_in_agent_response": true + } + }, + { + "id": "summarize_success_tool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "type": "MLModelTool", + "Name": "SummarizeSuccessTool", + "description": "Use this tool to summarize a PPL success response in query assist", + "parameters": { + "prompt": "\n\nHuman: You will be given a search response, summarize it as a concise paragraph while considering the following:\nUser's question on index '${parameters.index}': ${parameters.question}\nPPL (Piped Processing Language) query used: ${parameters.query}\n\nGive some documents to support your point.\nNote that the output could be truncated, summarize what you see. Don't mention about total items returned and don't mention about the fact that output is truncated if you see 'Output is too long, truncated' in the response.\n\nSkip the introduction; go straight into the summarization.\n\nUse the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\n${parameters.response}\n\nAssistant:", + "response_filter": "$.completion" + } + } + }, + { + "id": "summarize_error_tool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "type": "MLModelTool", + "name": "SummarizeErrorTool", + "description": "Use this tool to summarize a PPL error response in query assist", + "include_output_in_agent_response": true, + "parameters": { + "prompt": "\n\nHuman: You will be given an API response with errors, summarize it as a concise paragraph. Do not try to answer the user's question.\nIf the error cannot be fixed, eg. no such field or function not supported, then give suggestions to rephrase the question.\nIt is imperative that you must not give suggestions on how to fix the error or alternative PPL query, or answers to the question.\n\nConsider the following:\nUser's question on index '${parameters.index}': ${parameters.question}\nPPL (Piped Processing Language) query used: ${parameters.query}\n\nSkip the introduction; go straight into the summarization.\n\nUse the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n----------------\n${parameters.response}\n\nAssistant:", + "response_filter": "$.completion" + } + } + }, + { + "id": "suggestions_tool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "type": "MLModelTool", + "name": "SuggestionsTool", + "description": "Use this tool to generate possible questions for an index in query assist", + "include_output_in_agent_response": true, + "parameters": { + "prompt": "\n\nHuman: OpenSearch index: ${parameters.index}\n\nRecommend 2 or 3 possible questions on this index given the fields below. Only give the questions, do not give descriptions of questions and do not give PPL queries.\n\nThe format for a field is\n```\n- field_name: field_type (sample field value)\n```\n\nFields:\n${parameters.fields}\n\nPut each question in a tag.\n\nAssistant:", + "response_filter": "$.completion" + } + } + }, + { + "id": "ppl_agent", + "type": "register_agent", + "previous_node_inputs": { + "TransferQuestionToPPLAndExecuteTool": "tools" + }, + "user_inputs": { + "parameters": {}, + "app_type": "query_assist", + "name": "PPL agent", + "description": "this is the PPL agent", + "type": "flow" + } + } + ] + } + } +} +``` +{% include copy-curl.html %} + +
+ +For sample agent templates, see [Flow Framework sample templates](https://github.com/opensearch-project/flow-framework/tree/2.x/sample-templates). Note the agent ID; you'll use it in the following step. + +### Step 3: Create a root agent + +Next, create a [root agent]({{site.url}}{{site.baseurl}}/automating-configurations/workflow-tutorial/#root_agent) for the data summary agent created in the previous step: + +```json +POST /.plugins-ml-config/_doc/os_data2summary +{ + "type": "os_root_agent", + "configuration": { + "agent_id": "" + } +} +``` +{% include copy-curl.html %} + +### Step 4: Test the agent + +You can verify that the data summary agent was created successfully by calling the agent with an example payload: + +```json +POST /_plugins/_ml/agents//_execute +{ + "parameters": { + "sample_data":"'[{\"_index\":\"90943e30-9a47-11e8-b64d-95841ca0b247\",\"_source\":{\"referer\":\"http://twitter.com/success/gemini-9a\",\"request\":\"/beats/metricbeat/metricbeat-6.3.2-amd64.deb\",\"agent\":\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)\",\"extension\":\"deb\",\"memory\":null,\"ip\":\"239.67.210.53\",\"index\":\"opensearch_dashboards_sample_data_logs\",\"message\":\"239.67.210.53 - - [2018-08-30T15:29:01.686Z] \\\"GET /beats/metricbeat/metricbeat-6.3.2-amd64.deb HTTP/1.1\\\" 404 2633 \\\"-\\\" \\\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)\\\"\",\"url\":\"https://artifacts.opensearch.org/downloads/beats/metricbeat/metricbeat-6.3.2-amd64.deb\",\"tags\":\"success\",\"geo\":{\"srcdest\":\"CN:PL\",\"src\":\"CN\",\"coordinates\":{\"lat\":44.91167028,\"lon\":-108.4455092},\"dest\":\"PL\"},\"utc_time\":\"2024-09-05 15:29:01.686\",\"bytes\":2633,\"machine\":{\"os\":\"win xp\",\"ram\":21474836480},\"response\":\"404\",\"clientip\":\"239.67.210.53\",\"host\":\"artifacts.opensearch.org\",\"event\":{\"dataset\":\"sample_web_logs\"},\"phpmemory\":null,\"timestamp\":\"2024-09-05 15:29:01.686\"}}]'", + "sample_count":1, + "total_count":383, + "question":"Are there any errors in my logs?", + "ppl":"source=opensearch_dashboards_sample_data_logs| where QUERY_STRING(['response'], '4* OR 5*')"} +} +``` +{% include copy-curl.html %} + +## Generating a data summary + +You can generate a data summary by calling the `/api/assistant/data2summary` API endpoint. The `sample_count`, `total_count`, `question`, and `ppl` parameters are optional: + +```json +POST /api/assistant/data2summary +{ + "sample_data":"'[{\"_index\":\"90943e30-9a47-11e8-b64d-95841ca0b247\",\"_source\":{\"referer\":\"http://twitter.com/success/gemini-9a\",\"request\":\"/beats/metricbeat/metricbeat-6.3.2-amd64.deb\",\"agent\":\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)\",\"extension\":\"deb\",\"memory\":null,\"ip\":\"239.67.210.53\",\"index\":\"opensearch_dashboards_sample_data_logs\",\"message\":\"239.67.210.53 - - [2018-08-30T15:29:01.686Z] \\\"GET /beats/metricbeat/metricbeat-6.3.2-amd64.deb HTTP/1.1\\\" 404 2633 \\\"-\\\" \\\"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)\\\"\",\"url\":\"https://artifacts.opensearch.org/downloads/beats/metricbeat/metricbeat-6.3.2-amd64.deb\",\"tags\":\"success\",\"geo\":{\"srcdest\":\"CN:PL\",\"src\":\"CN\",\"coordinates\":{\"lat\":44.91167028,\"lon\":-108.4455092},\"dest\":\"PL\"},\"utc_time\":\"2024-09-05 15:29:01.686\",\"bytes\":2633,\"machine\":{\"os\":\"win xp\",\"ram\":21474836480},\"response\":\"404\",\"clientip\":\"239.67.210.53\",\"host\":\"artifacts.opensearch.org\",\"event\":{\"dataset\":\"sample_web_logs\"},\"phpmemory\":null,\"timestamp\":\"2024-09-05 15:29:01.686\"}}]'", + "sample_count":1, + "total_count":383, + "question":"Are there any errors in my logs?", + "ppl":"source=opensearch_dashboards_sample_data_logs| where QUERY_STRING(['response'], '4* OR 5*')" +} +``` +{% include copy-curl.html %} + +The following table describes the Assistant Data Summary API parameters. + +Parameter | Required/Optional | Description +:--- | :--- | :--- +`sample_data` | Required | A sample of data returned by the specified query and used as input for summarization. +`question` | Optional | The user's natural language question about the data, which guides the summary generation. +`ppl` | Optional | The Piped Processing Language (PPL) query used to retrieve data; in query assistance, this is generated by the LLM using the user's natural language question. +`sample_count` | Optional | The number of entries included in sample_data. +`total_count` | Optional | The total number of entries in the full query result set. + +## Viewing data summaries in OpenSearch Dashboards + +To view alert insights in OpenSearch Dashboards, use the following steps: + +1. On the top menu bar, go to **OpenSearch Dashboards > Discover**. + +1. From the query language dropdown list, select **PPL**. You will see the generated data summary after the query text, as shown in the following image. + + data summary \ No newline at end of file diff --git a/_dashboards/dashboards-assistant/index.md b/_dashboards/dashboards-assistant/index.md index bf2d754be8..615a53b75f 100644 --- a/_dashboards/dashboards-assistant/index.md +++ b/_dashboards/dashboards-assistant/index.md @@ -2,7 +2,7 @@ layout: default title: OpenSearch Assistant for OpenSearch Dashboards nav_order: 3 -has_children: false +has_children: true has_toc: false --- @@ -22,7 +22,7 @@ To enable **OpenSearch Assistant** in OpenSearch Dashboards, locate your copy of ```yaml assistant.chat.enabled: true ``` -{% include copy-curl.html %} +{% include copy.html %} Then configure the root `agent_id` through the following API: @@ -131,8 +131,17 @@ assistant.next.enabled: true ``` {% include copy-curl.html %} +## Additional Dashboards Assistant capabilities + +For information about additional Dashboards Assistant capabilities, see the following pages: + +- [Generating alert insights]({{site.url}}{{site.baseurl}}/dashboards/dashboards-assistant/alert-insight/) +- [Generating data summaries]({{site.url}}{{site.baseurl}}/dashboards/dashboards-assistant/data-summary/) +- [Generating anomaly detector suggestions]({{site.url}}{{site.baseurl}}/dashboards/dashboards-assistant/suggest-anomaly-detector/) +- [Generating visualizations from text]({{site.url}}{{site.baseurl}}/dashboards/dashboards-assistant/text-to-visualization/) + ## Related articles - [Getting started guide for OpenSearch Assistant in OpenSearch Dashboards](https://github.com/opensearch-project/dashboards-assistant/blob/main/GETTING_STARTED_GUIDE.md) - [OpenSearch Assistant configuration through the REST API]({{site.url}}{{site.baseurl}}/ml-commons-plugin/opensearch-assistant/) -- [Build your own chatbot]({{site.url}}{{site.baseurl}}/ml-commons-plugin/tutorials/build-chatbot/) \ No newline at end of file +- [Build your own chatbot]({{site.url}}{{site.baseurl}}/ml-commons-plugin/tutorials/build-chatbot/) diff --git a/_dashboards/dashboards-assistant/suggest-anomaly-detector.md b/_dashboards/dashboards-assistant/suggest-anomaly-detector.md new file mode 100644 index 0000000000..2c9f795e8d --- /dev/null +++ b/_dashboards/dashboards-assistant/suggest-anomaly-detector.md @@ -0,0 +1,86 @@ +--- +layout: default +title: Anomaly detector suggestions +parent: OpenSearch Assistant for OpenSearch Dashboards +nav_order: 1 +has_children: false +--- + +# Anomaly detector suggestions + +This is an experimental feature and is not recommended for use in a production environment. For updates on the progress of the feature or if you want to leave feedback, join the discussion on the [OpenSearch forum](https://forum.opensearch.org/). +{: .warning} + +The OpenSearch Dashboards Assistant can use a large language model (LLM) to suggest the creation of an anomaly detector. The LLM analyzes data patterns in your OpenSearch indexes and recommends configuration settings for the anomaly detector, making it easier to identify unusual activity or trends in your data. + +## Configuration + +To configure anomaly detector suggestions, use the following steps. + +### Prerequisite + +Before using anomaly detector suggestions, enable query enhancements in OpenSearch Dashboards as follows: + +1. On the top menu bar, go to **Management > Dashboards Management**. +1. In the left navigation pane, select **Advanced settings**. +1. On the settings page, toggle **Enable query enhancements** to **On**. + +### Step 1: Enable anomaly detector suggestions + +To enable anomaly detector suggestions, configure the following `opensearch_dashboards.yml` setting: + +```yaml +assistant.smartAnomalyDetector.enabled: true +``` +{% include copy.html %} + +### Step 2: Create an anomaly detector suggestion agent + +To orchestrate anomaly detector suggestions, create an anomaly detector suggestion [agent]({{site.url}}{{site.baseurl}}/ml-commons-plugin/agents-tools/index/#agents). To create an agent, send a `POST /_plugins/_ml/agents/_register` request and provide the agent template as a payload. For more information, see [Configuring OpenSearch Assistant]({{site.url}}{{site.baseurl}}/dashboards/dashboards-assistant/index/#configuring-opensearch-assistant). + +For sample agent templates, see [Flow Framework sample templates](https://github.com/opensearch-project/flow-framework/tree/2.x/sample-templates). Note the agent ID; you'll use it in the following step. + +### Step 3: Configure the agent + +Next, configure the anomaly detector suggestion agent created in the previous step: + +```json +POST /.plugins-ml-config/_doc/os_suggest_ad +{ + "type": "suggest_anomaly_detector_agent", + "configuration": { + "agent_id": "" + } +} +``` +{% include copy-curl.html %} + +### Step 4: Test the agent + +You can verify that the agent was created successfully by calling the agent with an example payload: + +```json +POST /_plugins/_ml/agents//_execute +{ + "parameters": { + "index":"sample_weblogs_test" + } +} +``` +{% include copy-curl.html %} + +## Viewing anomaly detector suggestions in OpenSearch Dashboards + +To view anomaly detector suggestions in OpenSearch Dashboards, use the following steps: + +1. On the top menu bar, go to **OpenSearch Dashboards > Discover**. + +1. From the index pattern dropdown list, select an index pattern. + +1. Select the **AI assistant** dropdown list and then select **Suggest anomaly detector**, as shown in the following image. + + Click the Suggest anomaly detector action + +1. Wait for the LLM to populate the **Suggest anomaly detector** fields that will be used to create an anomaly detector for the index pattern. Then select the **Create detector** button to create an anomaly detector, as shown in the following image. + + Suggested anomaly detector diff --git a/_dashboards/dashboards-assistant/text-to-visualization.md b/_dashboards/dashboards-assistant/text-to-visualization.md new file mode 100644 index 0000000000..5c0aab51f0 --- /dev/null +++ b/_dashboards/dashboards-assistant/text-to-visualization.md @@ -0,0 +1,283 @@ +--- +layout: default +title: Text to visualization +parent: OpenSearch Assistant for OpenSearch Dashboards +nav_order: 1 +has_children: false +--- + +# Text to visualization + +This is an experimental feature and is not recommended for use in a production environment. For updates on the progress of the feature or if you want to leave feedback, join the discussion on the [OpenSearch forum](https://forum.opensearch.org/). +{: .warning} + +The OpenSearch Dashboards Assistant can create visualizations using natural language instructions. + +## Configuration + +To configure text to visualization, use the following steps. + +### Step 1: Enable text to visualization + +To enable text to visualization, configure the following `opensearch_dashboards.yml` setting: + +```yaml +assistant.text2viz.enabled: true +``` +{% include copy.html %} + +### Step 2: Create the agents + +To orchestrate text to visualization, you'll need to create the necessary [agents]({{site.url}}{{site.baseurl}}/ml-commons-plugin/agents-tools/index/#agents). Create a workflow template for creating all necessary text-to-visualization agents by sending the following request: + +
+ + Request + + {: .text-delta} + +```json +POST /_plugins/_flow_framework/workflow +{ + "name": "Text to visualization agents", + "description": "This template is to create all Agents required for text to visualization", + "use_case": "REGISTER_AGENTS", + "version": { + "template": "1.0.0", + "compatibility": [ + "2.18.0", + "3.0.0" + ] + }, + "workflows": { + "provision": { + "user_params": {}, + "nodes": [ + { + "id": "create_claude_connector", + "type": "create_connector", + "previous_node_inputs": {}, + "user_inputs": { + "credential": { + "access_key": "", + "secret_key": "", + "session_token": "" + }, + "parameters": { + "endpoint": "bedrock-runtime.us-east-1.amazonaws.com", + "content_type": "application/json", + "auth": "Sig_V4", + "max_tokens_to_sample": "8000", + "service_name": "bedrock", + "temperature": "0.0000", + "response_filter": "$.content[0].text", + "region": "us-east-1", + "anthropic_version": "bedrock-2023-05-31" + }, + "version": "1", + "name": "Claude haiku runtime Connector", + "protocol": "aws_sigv4", + "description": "The connector to BedRock service for claude model", + "actions": [ + { + "action_type": "predict", + "method": "POST", + "url": "https://bedrock-runtime.us-east-1.amazonaws.com/model/anthropic.claude-3-haiku-20240307-v1:0/invoke", + "headers": { + "content-type": "application/json", + "x-amz-content-sha256": "required" + }, + "request_body": "{\"messages\":[{\"role\":\"user\",\"content\":[{\"type\":\"text\",\"text\":\"${parameters.prompt}\"}]}],\"anthropic_version\":\"${parameters.anthropic_version}\",\"max_tokens\":${parameters.max_tokens_to_sample}}" + } + ] + } + }, + { + "id": "register_claude_model", + "type": "register_remote_model", + "previous_node_inputs": { + "create_claude_connector": "connector_id" + }, + "user_inputs": { + "name": "claude-haiku", + "description": "Claude model", + "deploy": true + } + }, + { + "id": "create_t2vega_tool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "parameters": { + "prompt": "You're an expert at creating vega-lite visualization. No matter what the user asks, you should reply with a valid vega-lite specification in json.\nYour task is to generate Vega-Lite specification in json based on the given sample data, the schema of the data, the PPL query to get the data and the user's input.\nLet's start from dimension and metric/date. Now I have a question, I already transfer it to PPL and query my Opensearch cluster. \nThen I get data. For the PPL, it will do aggregation like \"stats AVG(field_1) as avg, COUNT(field_2) by field_3, field_4, field_5\". \nIn this aggregation, the metric is [avg, COUNT(field_2)] , and then we judge the type of field_3,4,5. If only field_5 is type related to date, the dimension is [field_3, field_4], and date is [field_5]\nFor example, stats SUM(bytes) by span(timestamp, 1w), machine.os, response, then SUM(bytes) is metric and span(timestamp, 1w) is date, while machine.os, response are dimensions.\nNotice: Some fields like 'span()....' will be the date, but not metric and dimension. \nAnd one field will only count once in dimension count. You should always pick field name from schema\nTo summarize,\nA dimension is a categorical variable that is used to group, segment, or categorize data. It is typically a qualitative attribute that provides context for metrics and is used to slice and dice data to see how different categories perform in relation to each other.\nThe dimension is not date related fields. The dimension and date are very closed. The only difference is date is related to datetime, while dimension is not.\nA metric is a quantitative measure used to quantify or calculate some aspect of the data. Metrics are numerical and typically represent aggregated values like sums, averages, counts, or other statistical calculations.\n\nIf a ppl doesn't have aggregation using 'stats', then each field in output is dimension.\nOtherwise, if a ppl use aggregation using 'stats' but doesn't group by using 'by', then each field in output is metric.\n\nThen for each given PPL, you could give the metric and dimension and date. One field will in only one of the metric, dimension or date.\n\nThen according to the metric number and dimension number of PPL result, you should first format the entrance code by metric_number, dimension_number, and date_number. For example, if metric_number = 1, dimension_number = 2, date_number=1, then the entrance code is 121.\nI define several use case categories here according to the entrance code.\nFor each category, I will define the entrance condition (number of metric and dimension)\nI will also give some defined attribute of generated vega-lite. Please refer to it to generate vega-lite.\n\nType 1:\nEntrance code: <1, 1, 0>\nDefined Attributes:\n {\n \"title\": \"\",\n \"description\": \"<description>\",\n \"mark\": \"bar\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<metric name>\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"field\": \"<dimension name>\",\n \"type\": \"nominal\"\n }\n },\n }\n\nType 2:\nEntrance code: <1, 2, 0>\nDefined Attributes:\n{\n \"mark\": \"bar\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\"\n },\n \"color\": {\n \"field\": \"<dimension 2>\",\n \"type\": \"nominal\"\n }\n }\n }\n\n\nType 3\nEntrance code: <3, 1, 0>\nDefined Attributes:\n{\n \"mark\": \"point\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"field\": \"<metric 2>\",\n \"type\": \"quantitative\"\n },\n \"size\": {\n \"field\": \"<metric 3>\",\n \"type\": \"quantitative\"\n },\n \"color\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\"\n }\n }\n}\n\nType 4\nEntrance code: <2, 1, 0>\nDefined Attributes:\n{\n \"mark\": \"point\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<mtric 1>\",\n \"type\": \"quantitative\"\n },\n \"y\": {\n \"field\": \"<metric 2>\",\n \"type\": \"quantitative\"\n },\n \"color\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\"\n }\n }\n}\n\nType 5:\nEntrance code: <2, 1, 1>\nDefined Attributes:\n{\n \"layer\": [\n {\n \"mark\": \"bar\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<date 1>\",\n \"type\": \"temporal\"\n },\n \"y\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric 1 name>\"\n }\n },\n \"color\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\"\n }\n }\n },\n {\n \"mark\": {\n \"type\": \"line\",\n \"color\": \"red\"\n },\n \"encoding\": {\n \"x\": {\n \"field\": \"<date 1>\",\n \"type\": \"temporal\"\n },\n \"y\": {\n \"field\": \"<metric 2>\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric 2 name>\",\n \"orient\": \"right\"\n }\n },\n \"color\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\"\n }\n }\n }\n ],\n \"resolve\": {\n \"scale\": {\n \"y\": \"independent\"\n }\n }\n }\n\nType 6:\nEntrance code: <2, 0, 1>\nDefined Attributes:\n{\n \"title\": \"<title>\",\n \"description\": \"<description>\",\n \"layer\": [\n {\n \"mark\": \"area\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<date 1>\",\n \"type\": \"temporal\"\n },\n \"y\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric 1 name>\"\n }\n }\n }\n },\n {\n \"mark\": {\n \"type\": \"line\",\n \"color\": \"black\"\n },\n \"encoding\": {\n \"x\": {\n \"field\": \"date\",\n \"type\": \"temporal\"\n },\n \"y\": {\n \"field\": \"metric 2\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric 2 name>\",\n \"orient\": \"right\"\n }\n }\n }\n }\n ],\n \"resolve\": {\n \"scale\": {\n \"y\": \"independent\"\n }\n }\n }\n \nType 7:\nEntrance code: <1, 0, 1>\nDefined Attributes:\n{\n \"title\": \"<title>\",\n \"description\": \"<description>\",\n \"mark\": \"line\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<date 1>\",\n \"type\": \"temporal\",\n \"axis\": {\n \"title\": \"<date name>\"\n }\n },\n \"y\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric name>\"\n }\n }\n }\n }\n\nType 8:\nEntrance code: <1, 1, 1>\nDefined Attributes:\n{\n \"title\": \"<title>\",\n \"description\": \"<description>\",\n \"mark\": \"line\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<date 1>\",\n \"type\": \"temporal\",\n \"axis\": {\n \"title\": \"<date name>\"\n }\n },\n \"y\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric name>\"\n }\n },\n \"color\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\",\n \"legend\": {\n \"title\": \"<dimension name>\"\n }\n }\n }\n }\n\nType 9:\nEntrance code: <1, 2, 1>\nDefined Attributes:\n{\n \"title\": \"<title>\",\n \"description\": \"<description>\",\n \"mark\": \"line\",\n \"encoding\": {\n \"x\": {\n \"field\": \"<date 1>\",\n \"type\": \"temporal\",\n \"axis\": {\n \"title\": \"<date name>\"\n }\n },\n \"y\": {\n \"field\": \"<metric 1>\",\n \"type\": \"quantitative\",\n \"axis\": {\n \"title\": \"<metric 1>\"\n }\n },\n \"color\": {\n \"field\": \"<dimension 1>\",\n \"type\": \"nominal\",\n \"legend\": {\n \"title\": \"<dimension 1>\"\n }\n },\n \"facet\": {\n \"field\": \"<dimension 2>\",\n \"type\": \"nominal\",\n \"columns\": 2\n }\n }\n }\n\nType 10:\nEntrance code: all other code\nAll others type.\nUse a table to show the result\n\n\nBesides, here are some requirements:\n1. Do not contain the key called 'data' in vega-lite specification.\n2. If mark.type = point and shape.field is a field of the data, the definition of the shape should be inside the root \"encoding\" object, NOT in the \"mark\" object, for example, {\"encoding\": {\"shape\": {\"field\": \"field_name\"}}}\n3. Please also generate title and description\n\nThe sample data in json format:\n${parameters.sampleData}\n\nThis is the schema of the data:\n${parameters.dataSchema}\n\nThe user used this PPL query to get the data: ${parameters.ppl}\n\nThe user's question is: ${parameters.input_question}\n\nNotice: Some fields like 'span()....' will be the date, but not metric and dimension. \nAnd one field will only count once in dimension count. You should always pick field name from schema.\n And when you code is <2, 1, 0>, it belongs type 4.\n And when you code is <1, 2, 0>, it belongs type 9.\n\n\nNow please reply a valid vega-lite specification in json based on above instructions.\nPlease return the number of dimension, metric and date. Then choose the type. \nPlease also return the type.\nFinally return the vega-lite specification according to the type.\nPlease make sure all the key in the schema matches the word I given. \nYour answer format should be:\nNumber of metrics:[list the metric name here, Don't use duplicate name] <number of metrics {a}> \nNumber of dimensions:[list the dimension name here] <number of dimension {b}> \nNumber of dates:[list the date name here] <number of dates {c}> \nThen format the entrance code by: <Number of metrics, Number of dimensions, Number of dates>\nType and its entrance code: <type number>: <its entrance code>\nThen apply the vega-lite requirements of the type.\n<vega-lite> {here is the vega-lite json} </vega-lite>\n\nAnd don't use 'transformer' in your vega-lite and wrap your vega-lite json in <vega-lite> </vega-lite> tags\n" + }, + "name": "Text2Vega", + "type": "MLModelTool" + } + }, + { + "id": "create_instruction_based_t2vega_tool", + "type": "create_tool", + "previous_node_inputs": { + "register_claude_model": "model_id" + }, + "user_inputs": { + "parameters": { + "prompt": "You're an expert at creating vega-lite visualization. No matter what the user asks, you should reply with a valid vega-lite specification in json.\nYour task is to generate Vega-Lite specification in json based on the given sample data, the schema of the data, the PPL query to get the data and the user's input.\n\nBesides, here are some requirements:\n1. Do not contain the key called 'data' in vega-lite specification.\n2. If mark.type = point and shape.field is a field of the data, the definition of the shape should be inside the root \"encoding\" object, NOT in the \"mark\" object, for example, {\"encoding\": {\"shape\": {\"field\": \"field_name\"}}}\n3. Please also generate title and description\n\nThe sample data in json format:\n${parameters.sampleData}\n\nThis is the schema of the data:\n${parameters.dataSchema}\n\nThe user used this PPL query to get the data: ${parameters.ppl}\n\nThe user's input question is: ${parameters.input_question}\nThe user's instruction on the visualization is: ${parameters.input_instruction}\n\nNow please reply a valid vega-lite specification in json based on above instructions.\nPlease only contain vega-lite in your response.\n" + }, + "name": "Text2Vega", + "type": "MLModelTool" + } + }, + { + "id": "t2vega_agent", + "type": "register_agent", + "previous_node_inputs": { + "create_t2vega_tool": "tools" + }, + "user_inputs": { + "parameters": {}, + "type": "flow", + "name": "t2vega agent", + "description": "this is the t2vega agent that has a set of rules to generate the visualizations" + } + }, + { + "id": "t2vega_instruction_based_agent", + "type": "register_agent", + "previous_node_inputs": { + "create_instruction_based_t2vega_tool": "tools" + }, + "user_inputs": { + "parameters": {}, + "type": "flow", + "name": "t2vega instruction based agent", + "description": "this is the t2vega agent that supports instructions" + } + } + ] + } + } +} +``` +{% include copy-curl.html %} + +</details> + +Use the workflow ID returned in the response to provision the resources: + +```json +POST /_plugins/_flow_framework/workflow/<workflow_id>/_provision +``` +{% include copy-curl.html %} + +To view the status of the workflow and all created resources, send the following request: + +```json +/_plugins/_flow_framework/workflow/<workflow_id>/_status +``` +{% include copy-curl.html %} + +### Step 3: Configure the root agent + +Next, configure a root agent for text to visualization: + +```json +POST /.plugins-ml-config/_doc/os_text2vega +{ + "type": "os_chat_root_agent", + "configuration": { + "agent_id": "<ROOT_AGENT_ID>" + } +} +``` +{% include copy-curl.html %} + +Configure the agent to receive user instructions for creating visualizations: + +```json +POST /.plugins-ml-config/_doc/os_text2vega_with_instructions +{ + "type": "os_chat_root_agent", + "configuration": { + "agent_id": "<ROOT_AGENT_ID>" + } +} +``` +{% include copy-curl.html %} + +### Step 4: Test the agent + +You can verify that the agent was created successfully by calling the agent with an example payload: + +```json +POST /_plugins/_ml/agents/<ROOT_AGENT_ID>/_execute +{ + "parameters": { + "input_question": "find unique visitors and average bytes every 3 hours", + "input_instruction": "display with different layers, use independent scale for different layers, display unique visitors with light blue bar chart", + "ppl": "source=opensearch_dashboards_sample_data_ecommerce| stats DISTINCT_COUNT(user) as unique_visitors, AVG(taxful_total_price) as avg_bytes by span(order_date, 3h)", + "sampleData": """[{\"unique_visitors\":15,\"avg_bytes\":90.98684210526316,\"span(order_date,3h)\":\"2024-04-25 00:00:00\"},{\"unique_visitors\":14,\"avg_bytes\":72.72083333333333,\"span(order_date,3h)\":\"2024-04-25 03:00:00\"}]""", + "dataSchema": """[{\"name\":\"unique_visitors\",\"type\":\"integer\"},{\"name\":\"avg_bytes\",\"type\":\"double\"},{\"name\":\"span(order_date,3h)\",\"type\":\"timestamp\"}]""" + } +} +``` +{% include copy-curl.html %} + +## Generating a visualization from text + +You can generate a visualization from text by calling the `/api/assistant/text2vega` API endpoint. The `input_instruction` parameter is optional: + +```json +POST /api/assistant/text2vega +{ + "input_instruction": "<input_instruction>", + "input_question": "<input_question>", + "ppl": "<ppl_query>", + "dataSchema": "<data_schema_of_ppl_response>", + "sampleData": "<sample_data_of_ppl_response>" +} +``` +{% include copy-curl.html %} + +The following table describes the Text to Visualization API parameters. + +Parameter | Required/Optional | Description +:--- | :--- | :--- +`input_question` | Required | The user's original question used to generate the corresponding Piped Processing Language (PPL) query. +`ppl` | Required | The generated PPL query that retrieves the data required for the visualization. +`dataSchema` | Required | Describes the structure and types of the data fields in the visualization output, based on the PPL response. +`sampleData` | Required | Provides sample entries from the data that will populate the visualization. +`input_instruction` | Optional | Specifies the styling instructions, such as colors, for the visualization. + +## Generating visualizations from text in OpenSearch Dashboards + +To generate visualizations from text in OpenSearch Dashboards, use the following steps: + +1. On the top menu bar, go to **OpenSearch Dashboards > Visualize** and then select **Create visualization**. + +1. In the **New Visualization** dialog, select **Natural language**, as shown in the following image. + + <img width="800px" src="{{site.url}}{{site.baseurl}}/images/dashboards-assistant/t2viz-start.png" alt="Create a visualization by selecting natural language"> + +1. From the data sources dropdown list, select a data source, as shown in the following image. + + <img src="{{site.url}}{{site.baseurl}}/images/dashboards-assistant/t2viz-select-data-source.png" alt="Create a visualization by selecting natural language"> + +1. In the text box on the upper right, enter a question using natural language. A new visualization is generated, as shown in the following image. + + <img src="{{site.url}}{{site.baseurl}}/images/dashboards-assistant/t2viz-ask-question.png" alt="Create a visualization by selecting natural language"> + +1. To modify the generated visualization, select **Edit visual**. In the **Edit visual** dialog, enter the desired modifications and then select **Apply**, as shown in the following image. + + <img src="{{site.url}}{{site.baseurl}}/images/dashboards-assistant/t2viz-edit-visual.png" alt="Create a visualization by selecting natural language"> + + The visualization is updated, as shown in the following image. + + <img src="{{site.url}}{{site.baseurl}}/images/dashboards-assistant/t2viz-edit-visual-response.png" alt="Create a visualization by selecting natural language"> + + + diff --git a/images/dashboards-assistant/alert-insight-insight.png b/images/dashboards-assistant/alert-insight-insight.png new file mode 100644 index 0000000000..3d65276d42 Binary files /dev/null and b/images/dashboards-assistant/alert-insight-insight.png differ diff --git a/images/dashboards-assistant/alert-insight-start.png b/images/dashboards-assistant/alert-insight-start.png new file mode 100644 index 0000000000..b55b6296bc Binary files /dev/null and b/images/dashboards-assistant/alert-insight-start.png differ diff --git a/images/dashboards-assistant/alert-insight-summary.png b/images/dashboards-assistant/alert-insight-summary.png new file mode 100644 index 0000000000..1e98170cda Binary files /dev/null and b/images/dashboards-assistant/alert-insight-summary.png differ diff --git a/images/dashboards-assistant/data-summary.png b/images/dashboards-assistant/data-summary.png new file mode 100644 index 0000000000..dc2e4e22f0 Binary files /dev/null and b/images/dashboards-assistant/data-summary.png differ diff --git a/images/dashboards-assistant/info-icon.png b/images/dashboards-assistant/info-icon.png new file mode 100644 index 0000000000..29e6b7b97b Binary files /dev/null and b/images/dashboards-assistant/info-icon.png differ diff --git a/images/dashboards-assistant/sparkle-icon.png b/images/dashboards-assistant/sparkle-icon.png new file mode 100644 index 0000000000..04b6d2b876 Binary files /dev/null and b/images/dashboards-assistant/sparkle-icon.png differ diff --git a/images/dashboards-assistant/suggestAD-UI.png b/images/dashboards-assistant/suggestAD-UI.png new file mode 100644 index 0000000000..dd7e32d6e2 Binary files /dev/null and b/images/dashboards-assistant/suggestAD-UI.png differ diff --git a/images/dashboards-assistant/suggestAD-button.png b/images/dashboards-assistant/suggestAD-button.png new file mode 100644 index 0000000000..a87fe862fe Binary files /dev/null and b/images/dashboards-assistant/suggestAD-button.png differ diff --git a/images/dashboards-assistant/t2viz-ask-question.png b/images/dashboards-assistant/t2viz-ask-question.png new file mode 100644 index 0000000000..e5b7e86f64 Binary files /dev/null and b/images/dashboards-assistant/t2viz-ask-question.png differ diff --git a/images/dashboards-assistant/t2viz-edit-visual-response.png b/images/dashboards-assistant/t2viz-edit-visual-response.png new file mode 100644 index 0000000000..1fd35425a7 Binary files /dev/null and b/images/dashboards-assistant/t2viz-edit-visual-response.png differ diff --git a/images/dashboards-assistant/t2viz-edit-visual.png b/images/dashboards-assistant/t2viz-edit-visual.png new file mode 100644 index 0000000000..0c57dd58aa Binary files /dev/null and b/images/dashboards-assistant/t2viz-edit-visual.png differ diff --git a/images/dashboards-assistant/t2viz-select-data-source.png b/images/dashboards-assistant/t2viz-select-data-source.png new file mode 100644 index 0000000000..172e136a5b Binary files /dev/null and b/images/dashboards-assistant/t2viz-select-data-source.png differ diff --git a/images/dashboards-assistant/t2viz-start.png b/images/dashboards-assistant/t2viz-start.png new file mode 100644 index 0000000000..f6d46a21e5 Binary files /dev/null and b/images/dashboards-assistant/t2viz-start.png differ