diff --git a/.backportrc.json b/.backportrc.json
index 8f458343c51af..3f1d639e9a480 100644
--- a/.backportrc.json
+++ b/.backportrc.json
@@ -3,6 +3,7 @@
"targetBranchChoices": [
{ "name": "master", "checked": true },
{ "name": "7.x", "checked": true },
+ "7.9",
"7.8",
"7.7",
"7.6",
@@ -26,7 +27,7 @@
"targetPRLabels": ["backport"],
"branchLabelMapping": {
"^v8.0.0$": "master",
- "^v7.9.0$": "7.x",
+ "^v7.10.0$": "7.x",
"^v(\\d+).(\\d+).\\d+$": "$1.$2"
}
}
diff --git a/.browserslistrc b/.browserslistrc
index 89114f393c462..04395b913c9c5 100644
--- a/.browserslistrc
+++ b/.browserslistrc
@@ -1,7 +1,9 @@
[production]
-last 2 versions
-> 5%
-Safari 7 # for PhantomJS support: https://github.com/elastic/kibana/issues/27136
+last 2 Firefox versions
+last 2 Chrome versions
+last 2 Safari versions
+> 0.25%
+not ie 11
[dev]
last 1 chrome versions
diff --git a/.ci/Jenkinsfile_baseline_capture b/.ci/Jenkinsfile_baseline_capture
new file mode 100644
index 0000000000000..b0d3591821642
--- /dev/null
+++ b/.ci/Jenkinsfile_baseline_capture
@@ -0,0 +1,28 @@
+#!/bin/groovy
+
+library 'kibana-pipeline-library'
+kibanaLibrary.load()
+
+kibanaPipeline(timeoutMinutes: 120) {
+ githubCommitStatus.trackBuild(params.commit, 'kibana-ci-baseline') {
+ ciStats.trackBuild {
+ catchError {
+ parallel([
+ 'oss-visualRegression': {
+ workers.ci(name: 'oss-visualRegression', size: 's-highmem', ramDisk: true) {
+ kibanaPipeline.functionalTestProcess('oss-visualRegression', './test/scripts/jenkins_visual_regression.sh')(1)
+ }
+ },
+ 'xpack-visualRegression': {
+ workers.ci(name: 'xpack-visualRegression', size: 's-highmem', ramDisk: true) {
+ kibanaPipeline.functionalTestProcess('xpack-visualRegression', './test/scripts/jenkins_xpack_visual_regression.sh')(1)
+ }
+ },
+ ])
+ }
+
+ kibanaPipeline.sendMail()
+ slackNotifications.onFailure()
+ }
+ }
+}
diff --git a/.ci/Jenkinsfile_baseline_trigger b/.ci/Jenkinsfile_baseline_trigger
new file mode 100644
index 0000000000000..221b7a44e30df
--- /dev/null
+++ b/.ci/Jenkinsfile_baseline_trigger
@@ -0,0 +1,70 @@
+#!/bin/groovy
+
+def MAXIMUM_COMMITS_TO_CHECK = 10
+def MAXIMUM_COMMITS_TO_BUILD = 5
+
+if (!params.branches_yaml) {
+ error "'branches_yaml' parameter must be specified"
+}
+
+def additionalBranches = []
+
+def branches = readYaml(text: params.branches_yaml) + additionalBranches
+
+library 'kibana-pipeline-library'
+kibanaLibrary.load()
+
+withGithubCredentials {
+ branches.each { branch ->
+ if (branch == '6.8') {
+ // skip 6.8, it is tracked but we don't need snapshots for it and haven't backported
+ // the baseline capture scripts to it.
+ return;
+ }
+
+ stage(branch) {
+ def commits = getCommits(branch, MAXIMUM_COMMITS_TO_CHECK, MAXIMUM_COMMITS_TO_BUILD)
+
+ commits.take(MAXIMUM_COMMITS_TO_BUILD).each { commit ->
+ catchErrors {
+ githubCommitStatus.create(commit, 'pending', 'Baseline started.', 'kibana-ci-baseline')
+
+ build(
+ propagate: false,
+ wait: false,
+ job: 'elastic+kibana+baseline-capture',
+ parameters: [
+ string(name: 'branch_specifier', value: branch),
+ string(name: 'commit', value: commit),
+ ]
+ )
+ }
+ }
+ }
+ }
+}
+
+def getCommits(String branch, maximumCommitsToCheck, maximumCommitsToBuild) {
+ print "Getting latest commits for ${branch}..."
+ def commits = githubApi.get("repos/elastic/kibana/commits?sha=${branch}").take(maximumCommitsToCheck).collect { it.sha }
+ def commitsToBuild = []
+
+ for (commit in commits) {
+ print "Getting statuses for ${commit}"
+ def status = githubApi.get("repos/elastic/kibana/statuses/${commit}").find { it.context == 'kibana-ci-baseline' }
+ print "Commit '${commit}' already built? ${status ? 'Yes' : 'No'}"
+
+ if (!status) {
+ commitsToBuild << commit
+ } else {
+ // Stop at the first commit we find that's already been triggered
+ break
+ }
+
+ if (commitsToBuild.size() >= maximumCommitsToBuild) {
+ break
+ }
+ }
+
+ return commitsToBuild.reverse() // We want the builds to trigger oldest-to-newest
+}
diff --git a/.ci/Jenkinsfile_coverage b/.ci/Jenkinsfile_coverage
index 3986367d660a1..ebb9c3dc86dd2 100644
--- a/.ci/Jenkinsfile_coverage
+++ b/.ci/Jenkinsfile_coverage
@@ -13,6 +13,7 @@ kibanaPipeline(timeoutMinutes: 240) {
workers.base(name: 'coverage-worker', size: 'l', ramDisk: false, bootstrapped: false) {
catchError {
kibanaCoverage.runTests()
+ kibanaTeamAssign.load('team_assignment', "### Upload Team Assignment JSON")
handleIngestion(TIME_STAMP)
}
handleFail()
diff --git a/.ci/Jenkinsfile_visual_baseline b/.ci/Jenkinsfile_visual_baseline
deleted file mode 100644
index 7c7cc8d98c306..0000000000000
--- a/.ci/Jenkinsfile_visual_baseline
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/groovy
-
-library 'kibana-pipeline-library'
-kibanaLibrary.load()
-
-kibanaPipeline(timeoutMinutes: 120) {
- ciStats.trackBuild {
- catchError {
- parallel([
- 'oss-visualRegression': {
- workers.ci(name: 'oss-visualRegression', size: 's', ramDisk: false) {
- kibanaPipeline.functionalTestProcess('oss-visualRegression', './test/scripts/jenkins_visual_regression.sh')(1)
- }
- },
- 'xpack-visualRegression': {
- workers.ci(name: 'xpack-visualRegression', size: 's', ramDisk: false) {
- kibanaPipeline.functionalTestProcess('xpack-visualRegression', './test/scripts/jenkins_xpack_visual_regression.sh')(1)
- }
- },
- ])
- }
-
- kibanaPipeline.sendMail()
- slackNotifications.onFailure()
- }
-}
diff --git a/.ci/end2end.groovy b/.ci/end2end.groovy
index 97099c6f87448..2cdc6d1c297cd 100644
--- a/.ci/end2end.groovy
+++ b/.ci/end2end.groovy
@@ -110,6 +110,9 @@ pipeline {
archiveArtifacts(allowEmptyArchive: true, artifacts: "${E2E_DIR}/kibana.log")
}
}
+ cleanup {
+ notifyBuildResult(prComment: false, analyzeFlakey: false, shouldNotify: false)
+ }
}
}
diff --git a/.ci/packer_cache_for_branch.sh b/.ci/packer_cache_for_branch.sh
index 5b4a94be50fa2..ab0ab845b2dc3 100755
--- a/.ci/packer_cache_for_branch.sh
+++ b/.ci/packer_cache_for_branch.sh
@@ -18,7 +18,7 @@ node scripts/es snapshot --download-only;
node scripts/es snapshot --license=oss --download-only;
# download reporting browsers
-(cd "x-pack" && yarn gulp prepare);
+(cd "x-pack" && yarn gulp downloadChromium);
# cache the chromedriver archive
chromedriverDistVersion="$(node -e "console.log(require('chromedriver').version)")"
diff --git a/.ci/pipeline-library/src/test/githubCommitStatus.groovy b/.ci/pipeline-library/src/test/githubCommitStatus.groovy
index 17878624b73cf..c770d5596f9cb 100644
--- a/.ci/pipeline-library/src/test/githubCommitStatus.groovy
+++ b/.ci/pipeline-library/src/test/githubCommitStatus.groovy
@@ -12,6 +12,7 @@ class GithubCommitStatusTest extends KibanaBasePipelineTest {
interface BuildState {
Object get(String key)
+ Object has(String key)
}
interface GithubApi {
@@ -25,6 +26,7 @@ class GithubCommitStatusTest extends KibanaBasePipelineTest {
buildStateMock = mock(BuildState)
githubApiMock = mock(GithubApi)
+ when(buildStateMock.has('checkoutInfo')).thenReturn(true)
when(buildStateMock.get('checkoutInfo')).thenReturn([ commit: 'COMMIT_HASH', ])
when(githubApiMock.post(any(), any())).thenReturn(null)
diff --git a/.ci/pipeline-library/src/test/prChanges.groovy b/.ci/pipeline-library/src/test/prChanges.groovy
index 0fb750d6ff64e..f149340517ff0 100644
--- a/.ci/pipeline-library/src/test/prChanges.groovy
+++ b/.ci/pipeline-library/src/test/prChanges.groovy
@@ -84,4 +84,17 @@ class PrChangesTest extends KibanaBasePipelineTest {
assertFalse(prChanges.areChangesSkippable())
}
+
+ @Test
+ void 'areChangesSkippable() with skippable changes that are in notSkippablePaths'() {
+ props([
+ githubPrs: [
+ getChanges: { [
+ [filename: 'docs/developer/architecture/code-exploration.asciidoc'],
+ ] },
+ ],
+ ])
+
+ assertFalse(prChanges.areChangesSkippable())
+ }
}
diff --git a/.eslintignore b/.eslintignore
index 4b5e781c26971..9263b483b8de9 100644
--- a/.eslintignore
+++ b/.eslintignore
@@ -26,13 +26,14 @@ target
/src/plugins/data/common/es_query/kuery/ast/_generated_/**
/src/plugins/vis_type_timelion/public/_generated_/**
/src/plugins/vis_type_timelion/public/webpackShims/jquery.flot.*
+/src/plugins/timelion/public/webpackShims/jquery.flot.*
/x-pack/legacy/plugins/**/__tests__/fixtures/**
/x-pack/plugins/apm/e2e/**/snapshots.js
/x-pack/plugins/apm/e2e/tmp/*
/x-pack/plugins/canvas/canvas_plugin
/x-pack/plugins/canvas/canvas_plugin_src/lib/flot-charts
/x-pack/plugins/canvas/shareable_runtime/build
-/x-pack/plugins/canvas/storybook
+/x-pack/plugins/canvas/storybook/build
/x-pack/plugins/monitoring/public/lib/jquery_flot
/x-pack/plugins/reporting/server/export_types/printable_pdf/server/lib/pdf/assets/**
/x-pack/legacy/plugins/infra/common/graphql/types.ts
diff --git a/.eslintrc.js b/.eslintrc.js
index 4425ad3a12659..e2674e8d7b407 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -49,6 +49,31 @@ const ELASTIC_LICENSE_HEADER = `
*/
`;
+const SAFER_LODASH_SET_HEADER = `
+/*
+ * Elasticsearch B.V licenses this file to you under the MIT License.
+ * See \`packages/elastic-safer-lodash-set/LICENSE\` for more information.
+ */
+`;
+
+const SAFER_LODASH_SET_LODASH_HEADER = `
+/*
+ * This file is forked from the lodash project (https://lodash.com/),
+ * and may include modifications made by Elasticsearch B.V.
+ * Elasticsearch B.V. licenses this file to you under the MIT License.
+ * See \`packages/elastic-safer-lodash-set/LICENSE\` for more information.
+ */
+`;
+
+const SAFER_LODASH_SET_DEFINITELYTYPED_HEADER = `
+/*
+ * This file is forked from the DefinitelyTyped project (https://github.com/DefinitelyTyped/DefinitelyTyped),
+ * and may include modifications made by Elasticsearch B.V.
+ * Elasticsearch B.V. licenses this file to you under the MIT License.
+ * See \`packages/elastic-safer-lodash-set/LICENSE\` for more information.
+ */
+`;
+
const allMochaRulesOff = {};
Object.keys(require('eslint-plugin-mocha').rules).forEach((k) => {
allMochaRulesOff['mocha/' + k] = 'off';
@@ -143,7 +168,12 @@ module.exports = {
'@kbn/eslint/disallow-license-headers': [
'error',
{
- licenses: [ELASTIC_LICENSE_HEADER],
+ licenses: [
+ ELASTIC_LICENSE_HEADER,
+ SAFER_LODASH_SET_HEADER,
+ SAFER_LODASH_SET_LODASH_HEADER,
+ SAFER_LODASH_SET_DEFINITELYTYPED_HEADER,
+ ],
},
],
},
@@ -174,7 +204,82 @@ module.exports = {
'@kbn/eslint/disallow-license-headers': [
'error',
{
- licenses: [APACHE_2_0_LICENSE_HEADER],
+ licenses: [
+ APACHE_2_0_LICENSE_HEADER,
+ SAFER_LODASH_SET_HEADER,
+ SAFER_LODASH_SET_LODASH_HEADER,
+ SAFER_LODASH_SET_DEFINITELYTYPED_HEADER,
+ ],
+ },
+ ],
+ },
+ },
+
+ /**
+ * safer-lodash-set package requires special license headers
+ */
+ {
+ files: ['packages/elastic-safer-lodash-set/**/*.{js,mjs,ts,tsx}'],
+ rules: {
+ '@kbn/eslint/require-license-header': [
+ 'error',
+ {
+ license: SAFER_LODASH_SET_LODASH_HEADER,
+ },
+ ],
+ '@kbn/eslint/disallow-license-headers': [
+ 'error',
+ {
+ licenses: [
+ ELASTIC_LICENSE_HEADER,
+ APACHE_2_0_LICENSE_HEADER,
+ SAFER_LODASH_SET_HEADER,
+ SAFER_LODASH_SET_DEFINITELYTYPED_HEADER,
+ ],
+ },
+ ],
+ },
+ },
+ {
+ files: ['packages/elastic-safer-lodash-set/test/*.{js,mjs,ts,tsx}'],
+ rules: {
+ '@kbn/eslint/require-license-header': [
+ 'error',
+ {
+ license: SAFER_LODASH_SET_HEADER,
+ },
+ ],
+ '@kbn/eslint/disallow-license-headers': [
+ 'error',
+ {
+ licenses: [
+ ELASTIC_LICENSE_HEADER,
+ APACHE_2_0_LICENSE_HEADER,
+ SAFER_LODASH_SET_LODASH_HEADER,
+ SAFER_LODASH_SET_DEFINITELYTYPED_HEADER,
+ ],
+ },
+ ],
+ },
+ },
+ {
+ files: ['packages/elastic-safer-lodash-set/**/*.d.ts'],
+ rules: {
+ '@kbn/eslint/require-license-header': [
+ 'error',
+ {
+ license: SAFER_LODASH_SET_DEFINITELYTYPED_HEADER,
+ },
+ ],
+ '@kbn/eslint/disallow-license-headers': [
+ 'error',
+ {
+ licenses: [
+ ELASTIC_LICENSE_HEADER,
+ APACHE_2_0_LICENSE_HEADER,
+ SAFER_LODASH_SET_HEADER,
+ SAFER_LODASH_SET_LODASH_HEADER,
+ ],
},
],
},
@@ -541,9 +646,129 @@ module.exports = {
* Harden specific rules
*/
{
- files: ['test/harden/*.js'],
+ files: ['test/harden/*.js', 'packages/elastic-safer-lodash-set/test/*.js'],
rules: allMochaRulesOff,
},
+ {
+ files: ['**/*.{js,mjs,ts,tsx}'],
+ rules: {
+ 'no-restricted-imports': [
+ 2,
+ {
+ paths: [
+ {
+ name: 'lodash',
+ importNames: ['set', 'setWith'],
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash.set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash.setwith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/setWith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/fp',
+ importNames: ['set', 'setWith', 'assoc', 'assocPath'],
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/fp/set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/fp/setWith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/fp/assoc',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/fp/assocPath',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ ],
+ },
+ ],
+ 'no-restricted-modules': [
+ 2,
+ {
+ paths: [
+ {
+ name: 'lodash.set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash.setwith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ name: 'lodash/setWith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ ],
+ },
+ ],
+ 'no-restricted-properties': [
+ 2,
+ {
+ object: 'lodash',
+ property: 'set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: '_',
+ property: 'set',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: 'lodash',
+ property: 'setWith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: '_',
+ property: 'setWith',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: 'lodash',
+ property: 'assoc',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: '_',
+ property: 'assoc',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: 'lodash',
+ property: 'assocPath',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ {
+ object: '_',
+ property: 'assocPath',
+ message: 'Please use @elastic/safer-lodash-set instead',
+ },
+ ],
+ },
+ },
/**
* APM overrides
@@ -997,6 +1222,12 @@ module.exports = {
],
},
},
+ {
+ files: ['x-pack/plugins/canvas/storybook/**'],
+ rules: {
+ 'import/no-extraneous-dependencies': 0,
+ },
+ },
{
files: ['x-pack/plugins/canvas/canvas_plugin_src/**/*.js'],
globals: { canvas: true, $: true },
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index f053c6da9c29b..2ad82ded6cb38 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -24,29 +24,20 @@
/src/plugins/vis_type_xy/ @elastic/kibana-app
/src/plugins/visualize/ @elastic/kibana-app
-# Core UI
-# Exclude tutorials folder for now because they are not owned by Kibana app and most will move out soon
-/src/plugins/home/public @elastic/kibana-core-ui
-/src/plugins/home/server/*.ts @elastic/kibana-core-ui
-/src/plugins/home/server/services/ @elastic/kibana-core-ui
-# Exclude tutorial resources folder for now because they are not owned by Kibana app and most will move out soon
-/src/legacy/core_plugins/kibana/public/home/*.ts @elastic/kibana-core-ui
-/src/legacy/core_plugins/kibana/public/home/*.scss @elastic/kibana-core-ui
-/src/legacy/core_plugins/kibana/public/home/np_ready/ @elastic/kibana-core-ui
-
# App Architecture
+/examples/bfetch_explorer/ @elastic/kibana-app-arch
+/examples/dashboard_embeddable_examples/ @elastic/kibana-app-arch
+/examples/demo_search/ @elastic/kibana-app-arch
/examples/developer_examples/ @elastic/kibana-app-arch
+/examples/embeddable_examples/ @elastic/kibana-app-arch
+/examples/embeddable_explorer/ @elastic/kibana-app-arch
+/examples/state_container_examples/ @elastic/kibana-app-arch
+/examples/ui_actions_examples/ @elastic/kibana-app-arch
+/examples/ui_actions_explorer/ @elastic/kibana-app-arch
/examples/url_generators_examples/ @elastic/kibana-app-arch
/examples/url_generators_explorer/ @elastic/kibana-app-arch
-/packages/kbn-interpreter/ @elastic/kibana-app-arch
/packages/elastic-datemath/ @elastic/kibana-app-arch
-/src/legacy/core_plugins/embeddable_api/ @elastic/kibana-app-arch
-/src/legacy/core_plugins/interpreter/ @elastic/kibana-app-arch
-/src/legacy/core_plugins/kibana_react/ @elastic/kibana-app-arch
-/src/legacy/core_plugins/kibana/public/management/ @elastic/kibana-app-arch
-/src/legacy/core_plugins/kibana/server/routes/api/management/ @elastic/kibana-app-arch
-/src/legacy/core_plugins/visualizations/ @elastic/kibana-app-arch
-/src/legacy/server/index_patterns/ @elastic/kibana-app-arch
+/packages/kbn-interpreter/ @elastic/kibana-app-arch
/src/plugins/advanced_settings/ @elastic/kibana-app-arch
/src/plugins/bfetch/ @elastic/kibana-app-arch
/src/plugins/data/ @elastic/kibana-app-arch
@@ -61,9 +52,10 @@
/src/plugins/share/ @elastic/kibana-app-arch
/src/plugins/ui_actions/ @elastic/kibana-app-arch
/src/plugins/visualizations/ @elastic/kibana-app-arch
-/x-pack/plugins/advanced_ui_actions/ @elastic/kibana-app-arch
+/x-pack/examples/ui_actions_enhanced_examples/ @elastic/kibana-app-arch
/x-pack/plugins/data_enhanced/ @elastic/kibana-app-arch
-/x-pack/plugins/drilldowns/ @elastic/kibana-app-arch
+/x-pack/plugins/embeddable_enhanced/ @elastic/kibana-app-arch
+/x-pack/plugins/ui_actions_enhanced/ @elastic/kibana-app-arch
# APM
/x-pack/plugins/apm/ @elastic/apm-ui
@@ -79,6 +71,16 @@
/x-pack/plugins/canvas/ @elastic/kibana-canvas
/x-pack/test/functional/apps/canvas/ @elastic/kibana-canvas
+# Core UI
+# Exclude tutorials folder for now because they are not owned by Kibana app and most will move out soon
+/src/plugins/home/public @elastic/kibana-core-ui
+/src/plugins/home/server/*.ts @elastic/kibana-core-ui
+/src/plugins/home/server/services/ @elastic/kibana-core-ui
+# Exclude tutorial resources folder for now because they are not owned by Kibana app and most will move out soon
+/src/legacy/core_plugins/kibana/public/home/*.ts @elastic/kibana-core-ui
+/src/legacy/core_plugins/kibana/public/home/*.scss @elastic/kibana-core-ui
+/src/legacy/core_plugins/kibana/public/home/np_ready/ @elastic/kibana-core-ui
+
# Observability UIs
/x-pack/legacy/plugins/infra/ @elastic/logs-metrics-ui
/x-pack/plugins/infra/ @elastic/logs-metrics-ui
diff --git a/.i18nrc.json b/.i18nrc.json
index 9af7f17067b8e..e8431fdb3f0e1 100644
--- a/.i18nrc.json
+++ b/.i18nrc.json
@@ -44,7 +44,7 @@
"src/plugins/telemetry_management_section"
],
"tileMap": "src/plugins/tile_map",
- "timelion": ["src/legacy/core_plugins/timelion", "src/plugins/vis_type_timelion"],
+ "timelion": ["src/plugins/timelion", "src/plugins/vis_type_timelion"],
"uiActions": "src/plugins/ui_actions",
"visDefaultEditor": "src/plugins/vis_default_editor",
"visTypeMarkdown": "src/plugins/vis_type_markdown",
diff --git a/.sass-lint.yml b/.sass-lint.yml
index 56b85adca8a71..d6eaaf391de1a 100644
--- a/.sass-lint.yml
+++ b/.sass-lint.yml
@@ -1,8 +1,9 @@
files:
include:
- 'src/legacy/core_plugins/metrics/**/*.s+(a|c)ss'
- - 'src/legacy/core_plugins/timelion/**/*.s+(a|c)ss'
+ - 'src/plugins/timelion/**/*.s+(a|c)ss'
- 'src/plugins/vis_type_vislib/**/*.s+(a|c)ss'
+ - 'src/plugins/vis_type_vega/**/*.s+(a|c)ss'
- 'src/plugins/vis_type_xy/**/*.s+(a|c)ss'
- 'x-pack/plugins/canvas/**/*.s+(a|c)ss'
- 'x-pack/plugins/triggers_actions_ui/**/*.s+(a|c)ss'
diff --git a/Jenkinsfile b/Jenkinsfile
index f6f77ccae8427..69c61b5bfa988 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -42,7 +42,6 @@ kibanaPipeline(timeoutMinutes: 155, checkPrChanges: true, setCommitStatus: true)
'xpack-ciGroup10': kibanaPipeline.xpackCiGroupProcess(10),
'xpack-accessibility': kibanaPipeline.functionalTestProcess('xpack-accessibility', './test/scripts/jenkins_xpack_accessibility.sh'),
'xpack-savedObjectsFieldMetrics': kibanaPipeline.functionalTestProcess('xpack-savedObjectsFieldMetrics', './test/scripts/jenkins_xpack_saved_objects_field_metrics.sh'),
- // 'xpack-pageLoadMetrics': kibanaPipeline.functionalTestProcess('xpack-pageLoadMetrics', './test/scripts/jenkins_xpack_page_load_metrics.sh'),
'xpack-securitySolutionCypress': { processNumber ->
whenChanged(['x-pack/plugins/security_solution/', 'x-pack/test/security_solution_cypress/']) {
kibanaPipeline.functionalTestProcess('xpack-securitySolutionCypress', './test/scripts/jenkins_security_solution_cypress.sh')(processNumber)
diff --git a/NOTICE.txt b/NOTICE.txt
index 56280e6e3883e..e1552852d0349 100644
--- a/NOTICE.txt
+++ b/NOTICE.txt
@@ -26,9 +26,6 @@ This module was heavily inspired by the externals plugin that ships with webpack
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
----
-This product has relied on ASTExplorer that is licensed under MIT.
-
---
This product includes code that is based on Ace editor, which was available
under a "BSD" license.
diff --git a/docs/api/dashboard/export-dashboard.asciidoc b/docs/api/dashboard/export-dashboard.asciidoc
index 36c551dee84fc..2099fb599ba67 100644
--- a/docs/api/dashboard/export-dashboard.asciidoc
+++ b/docs/api/dashboard/export-dashboard.asciidoc
@@ -35,7 +35,7 @@ experimental[] Export dashboards and corresponding saved objects.
[source,sh]
--------------------------------------------------
-$ curl -X GET "localhost:5601/api/kibana/dashboards/export?dashboard=942dcef0-b2cd-11e8-ad8e-85441f0c2e5c" <1>
+$ curl -X GET api/kibana/dashboards/export?dashboard=942dcef0-b2cd-11e8-ad8e-85441f0c2e5c <1>
--------------------------------------------------
// KIBANA
diff --git a/docs/api/dashboard/import-dashboard.asciidoc b/docs/api/dashboard/import-dashboard.asciidoc
index 320859f78c617..020ec8018b85b 100644
--- a/docs/api/dashboard/import-dashboard.asciidoc
+++ b/docs/api/dashboard/import-dashboard.asciidoc
@@ -42,7 +42,7 @@ Use the complete response body from the <
"index1",
@@ -40,7 +40,9 @@ POST /api/upgrade_assistant/reindex/batch
]
}
--------------------------------------------------
-<1> The order in which the indices are provided here determines the order in which the reindex tasks will be executed.
+// KIBANA
+
+<1> The order of the indices determines the order that the reindex tasks are executed.
Similar to the <>, the API returns the following:
diff --git a/docs/api/upgrade-assistant/check_reindex_status.asciidoc b/docs/api/upgrade-assistant/check_reindex_status.asciidoc
index 00801f201d1e1..98cf263673f73 100644
--- a/docs/api/upgrade-assistant/check_reindex_status.asciidoc
+++ b/docs/api/upgrade-assistant/check_reindex_status.asciidoc
@@ -64,6 +64,7 @@ The API returns the following:
`3`::
Paused
++
NOTE: If the {kib} node that started the reindex is shutdown or restarted, the reindex goes into a paused state after some time.
To resume the reindex, you must submit a new POST request to the `/api/upgrade_assistant/reindex/` endpoint.
diff --git a/docs/api/url-shortening.asciidoc b/docs/api/url-shortening.asciidoc
index a62529e11a9ba..ffe1d925e5dcb 100644
--- a/docs/api/url-shortening.asciidoc
+++ b/docs/api/url-shortening.asciidoc
@@ -1,5 +1,5 @@
[[url-shortening-api]]
-=== Shorten URL API
+== Shorten URL API
++++
Shorten URL
++++
@@ -9,34 +9,39 @@ Internet Explorer has URL length restrictions, and some wiki and markup parsers
Short URLs are designed to make sharing {kib} URLs easier.
+[float]
[[url-shortening-api-request]]
-==== Request
+=== Request
`POST :/api/shorten_url`
+[float]
[[url-shortening-api-request-body]]
-==== Request body
+=== Request body
`url`::
(Required, string) The {kib} URL that you want to shorten, relative to `/app/kibana`.
+[float]
[[url-shortening-api-response-body]]
-==== Response body
+=== Response body
urlId:: A top-level property that contains the shortened URL token for the provided request body.
+[float]
[[url-shortening-api-codes]]
-==== Response code
+=== Response code
`200`::
Indicates a successful call.
+[float]
[[url-shortening-api-example]]
-==== Example
+=== Example
[source,sh]
--------------------------------------------------
-$ curl -X POST "localhost:5601/api/shorten_url"
+$ curl -X POST api/shorten_url
{
"url": "/app/kibana#/dashboard?_g=()&_a=(description:'',filters:!(),fullScreenMode:!f,options:(hidePanelTitles:!f,useMargins:!t),panels:!((embeddableConfig:(),gridData:(h:15,i:'1',w:24,x:0,y:0),id:'8f4d0c00-4c86-11e8-b3d7-01146121b73d',panelIndex:'1',type:visualization,version:'7.0.0-alpha1')),query:(language:lucene,query:''),timeRestore:!f,title:'New%20Dashboard',viewMode:edit)"
}
diff --git a/docs/api/using-api.asciidoc b/docs/api/using-api.asciidoc
index e58d9c39ee8c4..c61edfb62b079 100644
--- a/docs/api/using-api.asciidoc
+++ b/docs/api/using-api.asciidoc
@@ -10,7 +10,23 @@ NOTE: The {kib} Console supports only Elasticsearch APIs. You are unable to inte
[float]
[[api-authentication]]
=== Authentication
-{kib} supports token-based authentication with the same username and password that you use to log into the {kib} Console. In a given HTTP tool, and when available, you can select to use its 'Basic Authentication' option, which is where the username and password are stored in order to be passed as part of the call.
+The {kib} APIs support key- and token-based authentication.
+
+[float]
+[[token-api-authentication]]
+==== Token-based authentication
+
+To use token-based authentication, you use the same username and password that you use to log into Elastic.
+In a given HTTP tool, and when available, you can select to use its 'Basic Authentication' option,
+which is where the username and password are stored in order to be passed as part of the call.
+
+[float]
+[[key-authentication]]
+==== Key-based authentication
+
+To use key-based authentication, you create an API key using the Elastic Console, then specify the key in the header of your API calls.
+
+For information about API keys, refer to <>.
[float]
[[api-calls]]
@@ -31,7 +47,7 @@ For example, the following `curl` command exports a dashboard:
[source,sh]
--
-curl -X POST -u $USER:$PASSWORD "localhost:5601/api/kibana/dashboards/export?dashboard=942dcef0-b2cd-11e8-ad8e-85441f0c2e5c"
+curl -X POST api/kibana/dashboards/export?dashboard=942dcef0-b2cd-11e8-ad8e-85441f0c2e5c
--
// KIBANA
@@ -51,7 +67,8 @@ For all APIs, you must use a request header. The {kib} APIs support the `kbn-xsr
* XSRF protections are disabled using the `server.xsrf.disableProtection` setting
`Content-Type: application/json`::
- Applicable only when you send a payload in the API request. {kib} API requests and responses use JSON. Typically, if you include the `kbn-xsrf` header, you must also include the `Content-Type` header.
+ Applicable only when you send a payload in the API request. {kib} API requests and responses use JSON.
+ Typically, if you include the `kbn-xsrf` header, you must also include the `Content-Type` header.
Request header example:
diff --git a/docs/dev-tools/grokdebugger/index.asciidoc b/docs/dev-tools/grokdebugger/index.asciidoc
index 5162e806edd07..994836de7a1a2 100644
--- a/docs/dev-tools/grokdebugger/index.asciidoc
+++ b/docs/dev-tools/grokdebugger/index.asciidoc
@@ -32,7 +32,7 @@ in ingest node and Logstash.
This example walks you through using the *Grok Debugger*. This tool
is automatically enabled in {kib}.
-NOTE: If you're using {security}, you must have the `manage_pipeline`
+NOTE: If you're using {stack-security-features}, you must have the `manage_pipeline`
permission to use the Grok Debugger.
. Open the menu, go to *Dev Tools*, then click *Grok Debugger*.
diff --git a/docs/developer/advanced/development-basepath.asciidoc b/docs/developer/advanced/development-basepath.asciidoc
index f0b760a21ea0c..cb341b9591174 100644
--- a/docs/developer/advanced/development-basepath.asciidoc
+++ b/docs/developer/advanced/development-basepath.asciidoc
@@ -1,5 +1,5 @@
[[development-basepath]]
-=== Considerations for basepath
+== Considerations for basepath
In dev mode, {kib} by default runs behind a proxy which adds a random path component to its URL.
diff --git a/docs/developer/advanced/development-es-snapshots.asciidoc b/docs/developer/advanced/development-es-snapshots.asciidoc
index 92fae7a241edf..4c801bf750979 100644
--- a/docs/developer/advanced/development-es-snapshots.asciidoc
+++ b/docs/developer/advanced/development-es-snapshots.asciidoc
@@ -1,32 +1,32 @@
[[development-es-snapshots]]
-=== Daily Elasticsearch Snapshots
+== Daily {es} Snapshots
-For local development and CI, {kib}, by default, uses Elasticsearch snapshots that are built daily when running tasks that require Elasticsearch (e.g. functional tests).
+For local development and CI, {kib}, by default, uses {es} snapshots that are built daily when running tasks that require {es} (e.g. functional tests).
-A snapshot is just a group of tarballs, one for each supported distribution/architecture/os of Elasticsearch, and a JSON-based manifest file containing metadata about the distributions.
+A snapshot is just a group of tarballs, one for each supported distribution/architecture/os of {es}, and a JSON-based manifest file containing metadata about the distributions.
-https://ci.kibana.dev/es-snapshots[A dashboard] is available that shows the current status and compatibility of the latest Elasticsearch snapshots.
+https://ci.kibana.dev/es-snapshots[A dashboard] is available that shows the current status and compatibility of the latest {es} snapshots.
-==== Process Overview
+=== Process Overview
-1. Elasticsearch snapshots are built for each current tracked branch of {kib}.
+1. {es} snapshots are built for each current tracked branch of {kib}.
2. Each snapshot is uploaded to a public Google Cloud Storage bucket, `kibana-ci-es-snapshots-daily`.
** At this point, the snapshot is not automatically used in CI or local development. It needs to be tested/verified first.
3. Each snapshot is tested with the latest commit of the corresponding {kib} branch, using the full CI suite.
4. After CI
** If the snapshot passes, it is promoted and automatically used in CI and local development.
-** If the snapshot fails, the issue must be investigated and resolved. A new incompatibility may exist between Elasticsearch and {kib}.
+** If the snapshot fails, the issue must be investigated and resolved. A new incompatibility may exist between {es} and {kib}.
-==== Using the latest snapshot
+=== Using the latest snapshot
-When developing locally, you may wish to use the most recent Elasticsearch snapshot, even if it's failing CI. To do so, prefix your commands with the follow environment variable:
+When developing locally, you may wish to use the most recent {es} snapshot, even if it's failing CI. To do so, prefix your commands with the follow environment variable:
["source","bash"]
-----------
KBN_ES_SNAPSHOT_USE_UNVERIFIED=true
-----------
-You can use this flag with any command that downloads and runs Elasticsearch snapshots, such as `scripts/es` or the FTR.
+You can use this flag with any command that downloads and runs {es} snapshots, such as `scripts/es` or the FTR.
For example, to run functional tests with the latest snapshot:
@@ -35,7 +35,7 @@ For example, to run functional tests with the latest snapshot:
KBN_ES_SNAPSHOT_USE_UNVERIFIED=true node scripts/functional_tests_server
-----------
-===== For Pull Requests
+==== For Pull Requests
Currently, there is not a way to run your pull request with the latest unverified snapshot without a code change. You can, however, do it with a small code change.
@@ -45,9 +45,9 @@ Currently, there is not a way to run your pull request with the latest unverifie
Your pull request should then use the latest snapshot the next time that it runs. Just don't merge the change to `Jenkinsfile`!
-==== Google Cloud Storage buckets
+=== Google Cloud Storage buckets
-===== kibana-ci-es-snapshots-daily
+==== kibana-ci-es-snapshots-daily
This bucket stores snapshots that are created on a daily basis, and is the primary location used by `kbn-es` to download snapshots.
@@ -61,7 +61,7 @@ The file structure for this bucket looks like this:
* `/archives//*.tar.gz.sha512`
* `/archives//manifest.json`
-===== kibana-ci-es-snapshots-permanent
+==== kibana-ci-es-snapshots-permanent
This bucket stores only the most recently promoted snapshot for each version. Old snapshots are only deleted when new ones are uploaded.
@@ -73,18 +73,18 @@ The file structure for this bucket looks like this:
* `/*.tar.gz.sha512`
* `/manifest.json`
-==== How snapshots are built, tested, and promoted
+=== How snapshots are built, tested, and promoted
-Each day, a https://kibana-ci.elastic.co/job/elasticsearch+snapshots+trigger/[Jenkins job] runs that triggers Elasticsearch builds for each currently tracked branch/version. This job is automatically updated with the correct branches whenever we release new versions of {kib}.
+Each day, a https://kibana-ci.elastic.co/job/elasticsearch+snapshots+trigger/[Jenkins job] runs that triggers {es} builds for each currently tracked branch/version. This job is automatically updated with the correct branches whenever we release new versions of {kib}.
-===== Build
+==== Build
-https://kibana-ci.elastic.co/job/elasticsearch+snapshots+build/[This Jenkins job] builds the Elasticsearch snapshots and uploads them to GCS.
+https://kibana-ci.elastic.co/job/elasticsearch+snapshots+build/[This Jenkins job] builds the {es} snapshots and uploads them to GCS.
The Jenkins job pipeline definition is https://github.com/elastic/kibana/blob/master/.ci/es-snapshots/Jenkinsfile_build_es[in the {kib} repo].
-1. Checkout Elasticsearch repo for the given branch/version.
-2. Run `./gradlew -p distribution/archives assemble --parallel` to create all of the Elasticsearch distributions.
+1. Checkout {es} repo for the given branch/version.
+2. Run `./gradlew -p distribution/archives assemble --parallel` to create all of the {es} distributions.
3. Create a tarball for each distribution.
4. Create a manifest JSON file containing info about the distribution, as well as its download URL.
5. Upload the tarballs and manifest to a unique location in the GCS bucket `kibana-ci-es-snapshots-daily`.
@@ -93,9 +93,9 @@ The Jenkins job pipeline definition is https://github.com/elastic/kibana/blob/ma
** This allows the `KBN_ES_SNAPSHOT_USE_UNVERIFIED` flag to work.
7. Trigger the verification job, to run the full {kib} CI test suite with this snapshot.
-===== Verification and Promotion
+==== Verification and Promotion
-https://kibana-ci.elastic.co/job/elasticsearch+snapshots+verify/[This Jenkins job] tests the latest Elasticsearch snapshot with the full {kib} CI pipeline, and promotes if it there are no test failures.
+https://kibana-ci.elastic.co/job/elasticsearch+snapshots+verify/[This Jenkins job] tests the latest {es} snapshot with the full {kib} CI pipeline, and promotes if it there are no test failures.
The Jenkins job pipeline definition is https://github.com/elastic/kibana/blob/master/.ci/es-snapshots/Jenkinsfile_verify_es[in the {kib} repo].
diff --git a/docs/developer/advanced/index.asciidoc b/docs/developer/advanced/index.asciidoc
index 139940ee42fe2..5c53bedd95e72 100644
--- a/docs/developer/advanced/index.asciidoc
+++ b/docs/developer/advanced/index.asciidoc
@@ -5,8 +5,8 @@
* <>
* <>
-include::development-es-snapshots.asciidoc[]
+include::development-es-snapshots.asciidoc[leveloffset=+1]
-include::running-elasticsearch.asciidoc[]
+include::running-elasticsearch.asciidoc[leveloffset=+1]
-include::development-basepath.asciidoc[]
\ No newline at end of file
+include::development-basepath.asciidoc[leveloffset=+1]
\ No newline at end of file
diff --git a/docs/developer/advanced/running-elasticsearch.asciidoc b/docs/developer/advanced/running-elasticsearch.asciidoc
index b03c231678eee..2361f805c7635 100644
--- a/docs/developer/advanced/running-elasticsearch.asciidoc
+++ b/docs/developer/advanced/running-elasticsearch.asciidoc
@@ -1,13 +1,13 @@
[[running-elasticsearch]]
-=== Running elasticsearch during development
+== Running {es} during development
-There are many ways to run Elasticsearch while you are developing.
+There are many ways to run {es} while you are developing.
-[float]
+[discrete]
-==== By snapshot
+=== By snapshot
-This will run a snapshot of elasticsearch that is usually built nightly. Read more about <>.
+This will run a snapshot of {es} that is usually built nightly. Read more about <>.
[source,bash]
----
@@ -25,36 +25,36 @@ yarn es snapshot --help
**Keeping data between snapshots**
-If you want to keep the data inside your Elasticsearch between usages of this command, you should use the following command, to keep your data folder outside the downloaded snapshot folder:
+If you want to keep the data inside your {es} between usages of this command, you should use the following command, to keep your data folder outside the downloaded snapshot folder:
[source,bash]
----
yarn es snapshot -E path.data=../data
----
-==== By source
+=== By source
-If you have the Elasticsearch repo checked out locally and wish to run against that, use `source`. By default, it will reference an elasticsearch checkout which is a sibling to the {kib} directory named elasticsearch. If you wish to use a checkout in another location you can provide that by supplying --source-path
+If you have the {es} repo checked out locally and wish to run against that, use `source`. By default, it will reference an {es} checkout which is a sibling to the {kib} directory named elasticsearch. If you wish to use a checkout in another location you can provide that by supplying --source-path
[source,bash]
----
yarn es source
----
-==== From an archive
+=== From an archive
-Use this if you already have a distributable. For released versions, one can be obtained on the Elasticsearch downloads page.
+Use this if you already have a distributable. For released versions, one can be obtained on the {es} downloads page.
[source,bash]
----
yarn es archive
----
-Each of these will run Elasticsearch with a basic license. Additional options are available, pass --help for more information.
+Each of these will run {es} with a basic license. Additional options are available, pass --help for more information.
-==== From a remote host
+=== From a remote host
-You can save some system resources, and the effort of generating sample data, if you have a remote Elasticsearch cluster to connect to. (Elasticians: you do! Check with your team about where to find credentials)
+You can save some system resources, and the effort of generating sample data, if you have a remote {es} cluster to connect to. (Elasticians: you do! Check with your team about where to find credentials)
You'll need to create a kibana.dev.yml (<>) and add the following to it:
@@ -75,7 +75,7 @@ kibana.index: '.{YourGitHubHandle}-kibana'
xpack.task_manager.index: '.{YourGitHubHandle}-task-manager-kibana'
----
-===== Running remote clusters
+==== Running remote clusters
Setup remote clusters for cross cluster search (CCS) and cross cluster replication (CCR).
@@ -95,7 +95,7 @@ yarn es snapshot -E transport.port=9500 -E http.port=9201 -E path.data=../data_p
Once both clusters are running, start {kib}. {kib} will connect to the primary cluster.
-Setup the remote cluster in {kib} from either Management -> Elasticsearch -> Remote Clusters UI or by running the following script in Console.
+Setup the remote cluster in {kib} from either Management -> {es} -> Remote Clusters UI or by running the following script in Console.
[source,bash]
----
diff --git a/docs/developer/architecture/add-data-tutorials.asciidoc b/docs/developer/architecture/add-data-tutorials.asciidoc
index e16b1bc039a10..3891b87a00e64 100644
--- a/docs/developer/architecture/add-data-tutorials.asciidoc
+++ b/docs/developer/architecture/add-data-tutorials.asciidoc
@@ -1,16 +1,16 @@
[[add-data-tutorials]]
-=== Add data tutorials
+== Add data tutorials
`Add Data` in the {kib} Home application contains tutorials for setting up data flows in the Elastic stack.
Each tutorial contains three sets of instructions:
-* `On Premise.` Set up a data flow when both {kib} and Elasticsearch are running on premise.
-* `On Premise Elastic Cloud.` Set up a data flow when {kib} is running on premise and Elasticsearch is running on Elastic Cloud.
-* `Elastic Cloud.` Set up a data flow when both {kib} and Elasticsearch are running on Elastic Cloud.
+* `On Premise.` Set up a data flow when both {kib} and {es} are running on premise.
+* `On Premise Elastic Cloud.` Set up a data flow when {kib} is running on premise and {es} is running on Elastic Cloud.
+* `Elastic Cloud.` Set up a data flow when both {kib} and {es} are running on Elastic Cloud.
-[float]
-==== Creating a new tutorial
+[discrete]
+=== Creating a new tutorial
1. Create a new directory in the link:https://github.com/elastic/kibana/tree/master/src/plugins/home/server/tutorials[tutorials directory].
2. In the new directory, create a file called `index.ts` that exports a function.
The function must return a function object that conforms to the `TutorialSchema` interface link:{kib-repo}tree/{branch}/src/plugins/home/server/services/tutorials/lib/tutorial_schema.ts[tutorial schema].
@@ -23,15 +23,15 @@ The function must return a function object that conforms to the `TutorialSchema`
If you are creating a new plugin and the tutorial is only related to that plugin, you can also place the `TutorialSchema` object into your plugin folder. Add `home` to the `requiredPlugins` list in your `kibana.json` file.
Then register the tutorial object by calling `home.tutorials.registerTutorial(tutorialObject)` in the `setup` lifecycle of your server plugin.
-[float]
-===== Variables
+[discrete]
+==== Variables
String values can contain variables that are substituted when rendered. Variables are specified by `{}`.
For example: `{config.docs.version}` is rendered as `6.2` when running the tutorial in {kib} 6.2.
link:{kib-repo}tree/{branch}/src/legacy/core_plugins/kibana/public/home/np_ready/components/tutorial/replace_template_strings.js#L23[Provided variables]
-[float]
-===== Markdown
+[discrete]
+==== Markdown
String values can contain limited Markdown syntax.
link:{kib-repo}tree/{branch}/src/legacy/core_plugins/kibana/public/home/components/tutorial/content.js#L8[Enabled Markdown grammars]
diff --git a/docs/developer/architecture/code-exploration.asciidoc b/docs/developer/architecture/code-exploration.asciidoc
new file mode 100644
index 0000000000000..2f67ae002c916
--- /dev/null
+++ b/docs/developer/architecture/code-exploration.asciidoc
@@ -0,0 +1,589 @@
+////
+
+NOTE:
+ This is an automatically generated file. Please do not edit directly. Instead, run the
+ following from within the kibana repository:
+
+ node scripts/build_plugin_list_docs
+
+ You can update the template within packages/kbn-dev-utils/target/plugin_list/generate_plugin_list.js
+
+////
+
+[[code-exploration]]
+== Exploring Kibana code
+
+The goals of our folder heirarchy are:
+
+- Easy for developers to know where to add new services, plugins and applications.
+- Easy for developers to know where to find the code from services, plugins and applications.
+- Easy to browse and understand our folder structure.
+
+To that aim, we strive to:
+
+- Avoid too many files in any given folder.
+- Choose clear, unambigious folder names.
+- Organize by domain.
+- Every folder should contain a README that describes the contents of that folder.
+
+[discrete]
+[[kibana-services-applications]]
+=== Services and Applications
+
+[discrete]
+==== src/plugins
+
+- {kib-repo}blob/{branch}/src/plugins/advanced_settings[advancedSettings]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/apm_oss[apmOss]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/bfetch/README.md[bfetch]
+
+bfetch allows to batch HTTP requests and streams responses back.
+
+
+- {kib-repo}blob/{branch}/src/plugins/charts/README.md[charts]
+
+The Charts plugin is a way to create easier integration of shared colors, themes, types and other utilities across all Kibana charts and visualizations.
+
+
+- {kib-repo}blob/{branch}/src/plugins/console[console]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/dashboard[dashboard]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/data/README.md[data]
+
+data plugin provides common data access services.
+
+
+- {kib-repo}blob/{branch}/src/plugins/dev_tools/README.md[devTools]
+
+The ui/registry/dev_tools is removed in favor of the devTools plugin which exposes a register method in the setup contract.
+Registering app works mostly the same as registering apps in core.application.register.
+Routing will be handled by the id of the dev tool - your dev tool will be mounted when the URL matches /app/dev_tools#/.
+This API doesn't support angular, for registering angular dev tools, bootstrap a local module on mount into the given HTML element.
+
+
+- {kib-repo}blob/{branch}/src/plugins/discover[discover]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/embeddable/README.md[embeddable]
+
+Embeddables are re-usable widgets that can be rendered in any environment or plugin. Developers can embed them directly in their plugin. End users can dynamically add them to any embeddable containers.
+
+
+- {kib-repo}blob/{branch}/src/plugins/es_ui_shared[esUiShared]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/expressions/README.md[expressions]
+
+This plugin provides methods which will parse & execute an expression pipeline
+string for you, as well as a series of registries for advanced users who might
+want to incorporate their own functions, types, and renderers into the service
+for use in their own application.
+
+
+- {kib-repo}blob/{branch}/src/plugins/home/README.md[home]
+
+Moves the legacy ui/registry/feature_catalogue module for registering "features" that should be shown in the home page's feature catalogue to a service within a "home" plugin. The feature catalogue refered to here should not be confused with the "feature" plugin for registering features used to derive UI capabilities for feature controls.
+
+
+- {kib-repo}blob/{branch}/src/plugins/index_pattern_management[indexPatternManagement]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/input_control_vis[inputControlVis]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/inspector/README.md[inspector]
+
+The inspector is a contextual tool to gain insights into different elements
+in Kibana, e.g. visualizations. It has the form of a flyout panel.
+
+
+- {kib-repo}blob/{branch}/src/plugins/kibana_legacy/README.md[kibanaLegacy]
+
+This plugin will contain several helpers and services to integrate pieces of the legacy Kibana app with the new Kibana platform.
+
+
+- {kib-repo}blob/{branch}/src/plugins/kibana_react/README.md[kibanaReact]
+
+Tools for building React applications in Kibana.
+
+
+- {kib-repo}blob/{branch}/src/plugins/kibana_usage_collection/README.md[kibanaUsageCollection]
+
+This plugin registers the basic usage collectors from Kibana:
+
+
+- {kib-repo}blob/{branch}/src/plugins/kibana_utils/README.md[kibanaUtils]
+
+Utilities for building Kibana plugins.
+
+
+- {kib-repo}blob/{branch}/src/plugins/legacy_export[legacyExport]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/management[management]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/maps_legacy[mapsLegacy]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/navigation/README.md[navigation]
+
+The navigation plugins exports the TopNavMenu component.
+It also provides a stateful version of it on the start contract.
+
+
+- {kib-repo}blob/{branch}/src/plugins/newsfeed[newsfeed]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/region_map[regionMap]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/saved_objects[savedObjects]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/saved_objects_management[savedObjectsManagement]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/share/README.md[share]
+
+Replaces the legacy ui/share module for registering share context menus.
+
+
+- {kib-repo}blob/{branch}/src/plugins/status_page[statusPage]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/telemetry/README.md[telemetry]
+
+Telemetry allows Kibana features to have usage tracked in the wild. The general term "telemetry" refers to multiple things:
+
+
+- {kib-repo}blob/{branch}/src/plugins/telemetry_collection_manager/README.md[telemetryCollectionManager]
+
+Telemetry's collection manager to go through all the telemetry sources when fetching it before reporting.
+
+
+- {kib-repo}blob/{branch}/src/plugins/telemetry_management_section/README.md[telemetryManagementSection]
+
+This plugin adds the Advanced Settings section for the Usage Data collection (aka Telemetry).
+
+
+- {kib-repo}blob/{branch}/src/plugins/tile_map[tileMap]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/timelion[timelion]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/ui_actions/README.md[uiActions]
+
+An API for:
+
+
+- {kib-repo}blob/{branch}/src/plugins/usage_collection/README.md[usageCollection]
+
+Usage Collection allows collecting usage data for other services to consume (telemetry and monitoring).
+To integrate with the telemetry services for usage collection of your feature, there are 2 steps:
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_markdown[visTypeMarkdown]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_metric[visTypeMetric]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_table[visTypeTable]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_tagcloud[visTypeTagcloud]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_timelion/README.md[visTypeTimelion]
+
+If your grammar was changed in public/chain.peg you need to re-generate the static parser. You could use a grunt task:
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_timeseries[visTypeTimeseries]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_vega[visTypeVega]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_vislib[visTypeVislib]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/vis_type_xy[visTypeXy]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/visualizations[visualizations]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/src/plugins/visualize[visualize]
+
+WARNING: Missing README.
+
+
+[discrete]
+==== x-pack/plugins
+
+- {kib-repo}blob/{branch}/x-pack/plugins/actions/README.md[actions]
+
+The Kibana actions plugin provides a framework to create executable actions. You can:
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/alerting_builtins/README.md[alertingBuiltins]
+
+This plugin provides alertTypes shipped with Kibana for use with the
+the alerts plugin. When enabled, it will register
+the built-in alertTypes with the alerting plugin, register associated HTTP
+routes, etc.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/alerts/README.md[alerts]
+
+The Kibana alerting plugin provides a common place to set up alerts. You can:
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/apm/readme.md[apm]
+
+To access an elasticsearch instance that has live data you have two options:
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/audit_trail[auditTrail]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/beats_management[beats_management]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/canvas/README.md[canvas]
+
+"Never look back. The past is done. The future is a blank canvas." ― Suzy Kassem, Rise Up and Salute the Sun
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/case/README.md[case]
+
+Experimental Feature
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/cloud[cloud]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/code[code]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/console_extensions[consoleExtensions]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/cross_cluster_replication/README.md[crossClusterReplication]
+
+You can run a local cluster and simulate a remote cluster within a single Kibana directory.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/dashboard_enhanced/README.md[dashboardEnhanced]
+
+- {kib-repo}blob/{branch}/x-pack/plugins/dashboard_mode[dashboardMode]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/data_enhanced[dataEnhanced]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/discover_enhanced[discoverEnhanced]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/embeddable_enhanced[embeddableEnhanced]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/encrypted_saved_objects/README.md[encryptedSavedObjects]
+
+The purpose of this plugin is to provide a way to encrypt/decrypt attributes on the custom Saved Objects that works with
+security and spaces filtering as well as performing audit logging.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/enterprise_search/README.md[enterpriseSearch]
+
+This plugin's goal is to provide a Kibana user interface to the Enterprise Search solution's products (App Search and Workplace Search). In it's current MVP state, the plugin provides the following with the goal of gathering user feedback and raising product awareness:
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/event_log/README.md[eventLog]
+
+The purpose of this plugin is to provide a way to persist a history of events
+occuring in Kibana, initially just for the Make It Action project - alerts
+and actions.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/features[features]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/file_upload[fileUpload]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/global_search/README.md[globalSearch]
+
+The GlobalSearch plugin provides an easy way to search for various objects, such as applications
+or dashboards from the Kibana instance, from both server and client-side plugins
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/global_search_providers[globalSearchProviders]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/graph/README.md[graph]
+
+This is the main source folder of the Graph plugin. It contains all of the Kibana server and client source code. x-pack/test/functional/apps/graph contains additional functional tests.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/grokdebugger/README.md[grokdebugger]
+
+- {kib-repo}blob/{branch}/x-pack/plugins/index_lifecycle_management/README.md[indexLifecycleManagement]
+
+You can test that the Frozen badge, phase filtering, and lifecycle information is surfaced in
+Index Management by running this series of requests in Console:
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/index_management[indexManagement]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/infra/README.md[infra]
+
+This is the home of the infra plugin, which aims to provide a solution for
+the infrastructure monitoring use-case within Kibana.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/ingest_manager/README.md[ingestManager]
+
+Fleet needs to have Elasticsearch API keys enabled, and also to have TLS enabled on kibana, (if you want to run Kibana without TLS you can provide the following config flag --xpack.ingestManager.fleet.tlsCheckDisabled=false)
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/ingest_pipelines/README.md[ingestPipelines]
+
+The ingest_pipelines plugin provides Kibana support for Elasticsearch's ingest nodes. Please refer to the Elasticsearch documentation for more details.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/lens/readme.md[lens]
+
+Run all tests from the x-pack root directory
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/license_management[licenseManagement]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/licensing/README.md[licensing]
+
+The licensing plugin retrieves license data from Elasticsearch at regular configurable intervals.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/lists/README.md[lists]
+
+README.md for developers working on the backend lists on how to get started
+using the CURL scripts in the scripts folder.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/logstash[logstash]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/maps/README.md[maps]
+
+Visualize geo data from Elasticsearch or 3rd party geo-services.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/maps_legacy_licensing/README.md[mapsLegacyLicensing]
+
+This plugin provides access to the detailed tile map services from Elastic.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/ml[ml]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/monitoring[monitoring]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/observability/README.md[observability]
+
+This plugin provides shared components and services for use across observability solutions, as well as the observability landing page UI.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/oss_telemetry[ossTelemetry]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/painless_lab[painlessLab]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/remote_clusters[remoteClusters]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/reporting/README.md[reporting]
+
+An awesome Kibana reporting plugin
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/rollup/README.md[rollup]
+
+Welcome to the Kibana rollup plugin! This plugin provides Kibana support for Elasticsearch's rollup feature. Please refer to the Elasticsearch documentation to understand rollup indices and how to create rollup jobs.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/searchprofiler[searchprofiler]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/security/README.md[security]
+
+See Configuring security in Kibana.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/security_solution[securitySolution]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/snapshot_restore/README.md[snapshotRestore]
+
+or
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/spaces[spaces]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/task_manager[taskManager]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/telemetry_collection_xpack/README.md[telemetryCollectionXpack]
+
+Gathers all usage collection, retrieving them from both: OSS and X-Pack plugins.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/transform[transform]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/translations[translations]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/triggers_actions_ui/README.md[triggers_actions_ui]
+
+The Kibana alerts and actions UI plugin provides a user interface for managing alerts and actions.
+As a developer you can reuse and extend built-in alerts and actions UI functionality:
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/ui_actions_enhanced/README.md[uiActionsEnhanced]
+
+- {kib-repo}blob/{branch}/x-pack/plugins/upgrade_assistant[upgradeAssistant]
+
+WARNING: Missing README.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/uptime/README.md[uptime]
+
+The purpose of this plugin is to provide users of Heartbeat more visibility of what's happening
+in their infrastructure.
+
+
+- {kib-repo}blob/{branch}/x-pack/plugins/watcher/README.md[watcher]
+
+This plugins adopts some conventions in addition to or in place of conventions in Kibana (at the time of the plugin's creation):
+
diff --git a/docs/developer/architecture/development-visualize-index.asciidoc b/docs/developer/architecture/development-visualize-index.asciidoc
index 551c41833fb72..d41ee32c1fb27 100644
--- a/docs/developer/architecture/development-visualize-index.asciidoc
+++ b/docs/developer/architecture/development-visualize-index.asciidoc
@@ -1,5 +1,5 @@
[[development-visualize-index]]
-=== Developing Visualizations
+== Developing Visualizations
[IMPORTANT]
==============================================
diff --git a/docs/developer/architecture/index.asciidoc b/docs/developer/architecture/index.asciidoc
index d726a8bd3642d..2e6ab1a4ad6ac 100644
--- a/docs/developer/architecture/index.asciidoc
+++ b/docs/developer/architecture/index.asciidoc
@@ -17,9 +17,12 @@ A few notable services are called out below.
* <>
* <>
* <>
+* <>
-include::add-data-tutorials.asciidoc[]
+include::add-data-tutorials.asciidoc[leveloffset=+1]
-include::development-visualize-index.asciidoc[]
+include::development-visualize-index.asciidoc[leveloffset=+1]
-include::security/index.asciidoc[]
+include::security/index.asciidoc[leveloffset=+1]
+
+include::code-exploration.asciidoc[leveloffset=+1]
diff --git a/docs/developer/architecture/security/feature-registration.asciidoc b/docs/developer/architecture/security/feature-registration.asciidoc
index 164f6d1cf9c74..3724624dbb917 100644
--- a/docs/developer/architecture/security/feature-registration.asciidoc
+++ b/docs/developer/architecture/security/feature-registration.asciidoc
@@ -1,13 +1,13 @@
[[development-plugin-feature-registration]]
-==== Plugin feature registration
+== Plugin feature registration
If your plugin will be used with {kib}'s default distribution, then you have the ability to register the features that your plugin provides. Features are typically apps in {kib}; once registered, you can toggle them via Spaces, and secure them via Roles when security is enabled.
-===== UI Capabilities
+=== UI Capabilities
Registering features also gives your plugin access to “UI Capabilities”. These capabilities are boolean flags that you can use to conditionally render your interface, based on the current user's permissions. For example, you can hide or disable a Save button if the current user is not authorized.
-===== Registering a feature
+=== Registering a feature
Feature registration is controlled via the built-in `xpack_main` plugin. To register a feature, call `xpack_main`'s `registerFeature` function from your plugin's `init` function, and provide the appropriate details:
@@ -21,7 +21,7 @@ init(server) {
}
-----------
-===== Feature details
+=== Feature details
Registering a feature consists of the following fields. For more information, consult the {kib-repo}blob/{branch}/x-pack/plugins/features/server/feature_registry.ts[feature registry interface].
@@ -65,12 +65,12 @@ Registering a feature consists of the following fields. For more information, co
|The ID of the navigation link associated with your feature.
|===
-====== Privilege definition
+==== Privilege definition
The `privileges` section of feature registration allows plugins to implement read/write and read-only modes for their applications.
For a full explanation of fields and options, consult the {kib-repo}blob/{branch}/x-pack/plugins/features/server/feature_registry.ts[feature registry interface].
-===== Using UI Capabilities
+=== Using UI Capabilities
UI Capabilities are available to your public (client) plugin code. These capabilities are read-only, and are used to inform the UI. This object is namespaced by feature id. For example, if your feature id is “foo”, then your UI Capabilities are stored at `uiCapabilities.foo`.
To access capabilities, import them from `ui/capabilities`:
@@ -86,7 +86,7 @@ if (canUserSave) {
-----------
[[example-1-canvas]]
-===== Example 1: Canvas Application
+=== Example 1: Canvas Application
["source","javascript"]
-----------
init(server) {
@@ -141,7 +141,7 @@ if (canUserSave) {
Because the `read` privilege does not define the `save` capability, users with read-only access will have their `uiCapabilities.canvas.save` flag set to `false`.
[[example-2-dev-tools]]
-===== Example 2: Dev Tools
+=== Example 2: Dev Tools
["source","javascript"]
-----------
@@ -176,7 +176,7 @@ init(server) {
},
privilegesTooltip: i18n.translate('xpack.features.devToolsPrivilegesTooltip', {
defaultMessage:
- 'User should also be granted the appropriate Elasticsearch cluster and index privileges',
+ 'User should also be granted the appropriate {es} cluster and index privileges',
}),
});
}
@@ -199,7 +199,7 @@ server.route({
-----------
[[example-3-discover]]
-===== Example 3: Discover
+=== Example 3: Discover
Discover takes advantage of subfeature privileges to allow fine-grained access control. In this example,
a single "Create Short URLs" subfeature privilege is defined, which allows users to grant access to this feature without having to grant the `all` privilege to Discover. In other words, you can grant `read` access to Discover, and also grant the ability to create short URLs.
diff --git a/docs/developer/architecture/security/index.asciidoc b/docs/developer/architecture/security/index.asciidoc
index 55b2450caf7a7..09739142c8f79 100644
--- a/docs/developer/architecture/security/index.asciidoc
+++ b/docs/developer/architecture/security/index.asciidoc
@@ -1,12 +1,14 @@
[[development-security]]
-=== Security
+== Security
-{kib} has generally been able to implement security transparently to core and plugin developers, and this largely remains the case. {kib} on two methods that the elasticsearch `Cluster` provides: `callWithRequest` and `callWithInternalUser`.
+{kib} has generally been able to implement security transparently to core and plugin developers, and this largely remains the case. {kib} on two methods that the {es} `Cluster` provides: `callWithRequest` and `callWithInternalUser`.
-`callWithRequest` executes requests against Elasticsearch using the authentication credentials of the {kib} end-user. So, if you log into {kib} with the user of `foo` when `callWithRequest` is used, {kib} execute the request against Elasticsearch as the user `foo`. Historically, `callWithRequest` has been used extensively to perform actions that are initiated at the request of {kib} end-users.
+`callWithRequest` executes requests against {es} using the authentication credentials of the {kib} end-user. So, if you log into {kib} with the user of `foo` when `callWithRequest` is used, {kib} execute the request against {es} as the user `foo`. Historically, `callWithRequest` has been used extensively to perform actions that are initiated at the request of {kib} end-users.
-`callWithInternalUser` executes requests against Elasticsearch using the internal {kib} server user, and has historically been used for performing actions that aren't initiated by {kib} end users; for example, creating the initial `.kibana` index or performing health checks against Elasticsearch.
+`callWithInternalUser` executes requests against {es} using the internal {kib} server user, and has historically been used for performing actions that aren't initiated by {kib} end users; for example, creating the initial `.kibana` index or performing health checks against {es}.
-However, with the changes that role-based access control (RBAC) introduces, this is no longer cut and dry. {kib} now requires all access to the `.kibana` index goes through the `SavedObjectsClient`. This used to be a best practice, as the `SavedObjectsClient` was responsible for translating the documents stored in Elasticsearch to and from Saved Objects, but RBAC is now taking advantage of this abstraction to implement access control and determine when to use `callWithRequest` versus `callWithInternalUser`.
+However, with the changes that role-based access control (RBAC) introduces, this is no longer cut and dry. {kib} now requires all access to the `.kibana` index goes through the `SavedObjectsClient`. This used to be a best practice, as the `SavedObjectsClient` was responsible for translating the documents stored in {es} to and from Saved Objects, but RBAC is now taking advantage of this abstraction to implement access control and determine when to use `callWithRequest` versus `callWithInternalUser`.
-include::rbac.asciidoc[]
+include::rbac.asciidoc[leveloffset=+1]
+
+include::feature-registration.asciidoc[leveloffset=+1]
diff --git a/docs/developer/architecture/security/rbac.asciidoc b/docs/developer/architecture/security/rbac.asciidoc
index ae1979e856e23..7b35a91ca73d0 100644
--- a/docs/developer/architecture/security/rbac.asciidoc
+++ b/docs/developer/architecture/security/rbac.asciidoc
@@ -1,9 +1,9 @@
[[development-security-rbac]]
-==== Role-based access control
+== Role-based access control
Role-based access control (RBAC) in {kib} relies upon the
{ref}/security-privileges.html#application-privileges[application privileges]
-that Elasticsearch exposes. This allows {kib} to define the privileges that
+that {es} exposes. This allows {kib} to define the privileges that
{kib} wishes to grant to users, assign them to the relevant users using roles,
and then authorize the user to perform a specific action. This is handled within
a secured instance of the `SavedObjectsClient` and available transparently to
@@ -11,7 +11,7 @@ consumers when using `request.getSavedObjectsClient()` or
`savedObjects.getScopedSavedObjectsClient()`.
[[development-rbac-privileges]]
-===== {kib} Privileges
+=== {kib} Privileges
When {kib} first starts up, it executes the following `POST` request against {es}. This synchronizes the definition of the privileges with various `actions` which are later used to authorize a user:
@@ -56,7 +56,7 @@ The application is created by concatenating the prefix of `kibana-` with the val
==============================================
[[development-rbac-assigning-privileges]]
-===== Assigning {kib} Privileges
+=== Assigning {kib} Privileges
{kib} privileges are assigned to specific roles using the `applications` element. For example, the following role assigns the <> privilege at `*` `resources` (which will in the future be used to secure spaces) to the default {kib} `application`:
@@ -81,7 +81,7 @@ Roles that grant <> should be managed using the <>
* <>
-include::stability.asciidoc[]
+include::stability.asciidoc[leveloffset=+1]
-include::security.asciidoc[]
+include::security.asciidoc[leveloffset=+1]
diff --git a/docs/developer/best-practices/security.asciidoc b/docs/developer/best-practices/security.asciidoc
index 26fcc73ce2b90..79ecb08295064 100644
--- a/docs/developer/best-practices/security.asciidoc
+++ b/docs/developer/best-practices/security.asciidoc
@@ -1,5 +1,5 @@
[[security-best-practices]]
-=== Security best practices
+== Security best practices
* XSS
** Check for usages of `dangerouslySetInnerHtml`, `Element.innerHTML`,
@@ -44,7 +44,7 @@ sensitive information which end up in the HTTP Response
** Ensure no sensitive cookies are forwarded to external resources.
** Ensure that all user controllable variables that are used in
constructing a URL are escaped properly. This is relevant when using
-`transport.request` with the Elasticsearch client as no automatic
+`transport.request` with the {es} client as no automatic
escaping is performed.
* Reverse tabnabbing -
https://github.com/OWASP/CheatSheetSeries/blob/master/cheatsheets/HTML5_Security_Cheat_Sheet.md#tabnabbing
diff --git a/docs/developer/best-practices/stability.asciidoc b/docs/developer/best-practices/stability.asciidoc
index 68237a034be52..f4b7ae1229909 100644
--- a/docs/developer/best-practices/stability.asciidoc
+++ b/docs/developer/best-practices/stability.asciidoc
@@ -1,10 +1,10 @@
[[stability]]
-=== Stability
+== Stability
Ensure your feature will work under all possible {kib} scenarios.
-[float]
-==== Environmental configuration scenarios
+[discrete]
+=== Environmental configuration scenarios
* Cloud
** Does the feature work on *cloud environment*?
@@ -32,16 +32,16 @@ non-standard {kib} indices. (create their own custom roles)
sessions. (we have had many discuss/SDH tickets around this)
* If a proxy/loadbalancer is running between ES and {kib}
-[float]
-==== Kibana.yml settings
+[discrete]
+=== Kibana.yml settings
* Using a custom {kib} index alias
* When optional dependencies are disabled
** Ensure all your required dependencies are listed in kibana.json
dependency list!
-[float]
-==== Test coverage
+[discrete]
+=== Test coverage
* Does the feature have sufficient unit test coverage? (does it handle
storeinSessions?)
@@ -49,16 +49,16 @@ storeinSessions?)
* Does the feature have sufficient Rest API coverage test coverage?
* Does the feature have sufficient Integration test coverage?
-[float]
-==== Browser coverage
+[discrete]
+=== Browser coverage
Refer to the list of browsers and OS {kib} supports
https://www.elastic.co/support/matrix
Does the feature work efficiently on the list of supported browsers?
-[float]
-==== Upgrade Scenarios - Migration scenarios-
+[discrete]
+=== Upgrade Scenarios - Migration scenarios-
Does the feature affect old
indices, saved objects ? - Has the feature been tested with {kib}
diff --git a/docs/developer/contributing/development-accessibility-tests.asciidoc b/docs/developer/contributing/development-accessibility-tests.asciidoc
index a3ffefb94cd2a..facf7ff14a6c1 100644
--- a/docs/developer/contributing/development-accessibility-tests.asciidoc
+++ b/docs/developer/contributing/development-accessibility-tests.asciidoc
@@ -1,5 +1,5 @@
[[development-accessibility-tests]]
-==== Automated Accessibility Testing
+== Automated Accessibility Testing
To run the tests locally:
diff --git a/docs/developer/contributing/development-documentation.asciidoc b/docs/developer/contributing/development-documentation.asciidoc
index d9fae42eef87e..99e55963f57af 100644
--- a/docs/developer/contributing/development-documentation.asciidoc
+++ b/docs/developer/contributing/development-documentation.asciidoc
@@ -1,18 +1,18 @@
[[development-documentation]]
-=== Documentation during development
+== Documentation during development
Docs should be written during development and accompany PRs when relevant. There are multiple types of documentation, and different places to add each.
-[float]
-==== Developer services documentation
+[discrete]
+=== Developer services documentation
Documentation about specific services a plugin offers should be encapsulated in:
* README.asciidoc at the base of the plugin folder.
* Typescript comments for all public services.
-[float]
-==== End user documentation
+[discrete]
+=== End user documentation
Documentation about user facing features should be written in http://asciidoc.org/[asciidoc] at
{kib-repo}/tree/master/docs[https://github.com/elastic/kibana/tree/master/docs]
@@ -27,8 +27,8 @@ README for getting the docs tooling set up.
node scripts/docs.js --open
```
-[float]
-==== General developer documentation and guidelines
+[discrete]
+=== General developer documentation and guidelines
General developer guildlines and documentation, like this right here, should be written in http://asciidoc.org/[asciidoc]
at {kib-repo}/tree/master/docs/developer[https://github.com/elastic/kibana/tree/master/docs/developer]
diff --git a/docs/developer/contributing/development-functional-tests.asciidoc b/docs/developer/contributing/development-functional-tests.asciidoc
index 442fc1ac755d3..580a5a000f391 100644
--- a/docs/developer/contributing/development-functional-tests.asciidoc
+++ b/docs/developer/contributing/development-functional-tests.asciidoc
@@ -1,10 +1,10 @@
[[development-functional-tests]]
-=== Functional Testing
+== Functional Testing
We use functional tests to make sure the {kib} UI works as expected. It replaces hours of manual testing by automating user interaction. To have better control over our functional test environment, and to make it more accessible to plugin authors, {kib} uses a tool called the `FunctionalTestRunner`.
-[float]
-==== Running functional tests
+[discrete]
+=== Running functional tests
The `FunctionalTestRunner` is very bare bones and gets most of its functionality from its config file, located at {blob}test/functional/config.js[test/functional/config.js]. If you’re writing a plugin outside the {kib} repo, you will have your own config file.
See <> for more info.
@@ -12,27 +12,27 @@ The `FunctionalTestRunner` is very bare bones and gets most of its functionality
There are three ways to run the tests depending on your goals:
1. Easiest option:
-** Description: Starts up {kib} & Elasticsearch servers, followed by running tests. This is much slower when running the tests multiple times because slow startup time for the servers. Recommended for single-runs.
+** Description: Starts up {kib} & {es} servers, followed by running tests. This is much slower when running the tests multiple times because slow startup time for the servers. Recommended for single-runs.
** `node scripts/functional_tests`
-*** does everything in a single command, including running Elasticsearch and {kib} locally
+*** does everything in a single command, including running {es} and {kib} locally
*** tears down everything after the tests run
*** exit code reports success/failure of the tests
2. Best for development:
-** Description: Two commands, run in separate terminals, separate the components that are long-running and slow from those that are ephemeral and fast. Tests can be re-run much faster, and this still runs Elasticsearch & {kib} locally.
+** Description: Two commands, run in separate terminals, separate the components that are long-running and slow from those that are ephemeral and fast. Tests can be re-run much faster, and this still runs {es} & {kib} locally.
** `node scripts/functional_tests_server`
-*** starts Elasticsearch and {kib} servers
+*** starts {es} and {kib} servers
*** slow to start
*** can be reused for multiple executions of the tests, thereby saving some time when re-running tests
*** automatically restarts the {kib} server when relevant changes are detected
** `node scripts/functional_test_runner`
-*** runs the tests against {kib} & Elasticsearch servers that were started by `node scripts/functional_tests_server`
+*** runs the tests against {kib} & {es} servers that were started by `node scripts/functional_tests_server`
*** exit code reports success or failure of the tests
3. Custom option:
-** Description: Runs tests against instances of Elasticsearch & {kib} started some other way (like Elastic Cloud, or an instance you are managing in some other way).
+** Description: Runs tests against instances of {es} & {kib} started some other way (like Elastic Cloud, or an instance you are managing in some other way).
** just executes the functional tests
-** url, credentials, etc. for Elasticsearch and {kib} are specified via environment variables
+** url, credentials, etc. for {es} and {kib} are specified via environment variables
** Here's an example that runs against an Elastic Cloud instance. Note that you must run the same branch of tests as the version of {kib} you're testing.
+
["source","shell"]
@@ -91,15 +91,15 @@ export TEST_THROTTLE_NETWORK=1
node scripts/functional_test_runner --exclude-tag skipCloud
----------
-[float]
-===== More about `node scripts/functional_test_runner`
+[discrete]
+==== More about `node scripts/functional_test_runner`
When run without any arguments the `FunctionalTestRunner` automatically loads the configuration in the standard location, but you can override that behavior with the `--config` flag. List configs with multiple --config arguments.
-* `--config test/functional/config.js` starts Elasticsearch and {kib} servers with the WebDriver tests configured to run in Chrome.
-* `--config test/functional/config.firefox.js` starts Elasticsearch and {kib} servers with the WebDriver tests configured to run in Firefox.
-* `--config test/api_integration/config.js` starts Elasticsearch and {kib} servers with the api integration tests configuration.
-* `--config test/accessibility/config.ts` starts Elasticsearch and {kib} servers with the WebDriver tests configured to run an accessibility audit using https://www.deque.com/axe/[axe].
+* `--config test/functional/config.js` starts {es} and {kib} servers with the WebDriver tests configured to run in Chrome.
+* `--config test/functional/config.firefox.js` starts {es} and {kib} servers with the WebDriver tests configured to run in Firefox.
+* `--config test/api_integration/config.js` starts {es} and {kib} servers with the api integration tests configuration.
+* `--config test/accessibility/config.ts` starts {es} and {kib} servers with the WebDriver tests configured to run an accessibility audit using https://www.deque.com/axe/[axe].
There are also command line flags for `--bail` and `--grep`, which behave just like their mocha counterparts. For instance, use `--grep=foo` to run only tests that match a regular expression.
@@ -108,11 +108,11 @@ Logging can also be customized with `--quiet`, `--debug`, or `--verbose` flags.
Use the `--help` flag for more options.
-[float]
-==== Writing functional tests
+[discrete]
+=== Writing functional tests
-[float]
-===== Environment
+[discrete]
+==== Environment
The tests are written in https://mochajs.org[mocha] using https://github.com/elastic/kibana/tree/master/packages/kbn-expect[@kbn/expect] for assertions.
@@ -120,8 +120,8 @@ We use https://www.w3.org/TR/webdriver1/[WebDriver Protocol] to run tests in bot
The `FunctionalTestRunner` automatically transpiles functional tests using babel, so that tests can use the same ECMAScript features that {kib} source code uses. See {blob}style_guides/js_style_guide.md[style_guides/js_style_guide.md].
-[float]
-===== Definitions
+[discrete]
+==== Definitions
**Provider:**
@@ -179,8 +179,8 @@ To run tests on Firefox locally, use `config.firefox.js`:
node scripts/functional_test_runner --config test/functional/config.firefox.js
-----------
-[float]
-===== Using the test_user service
+[discrete]
+==== Using the test_user service
Tests should run at the positive security boundry condition, meaning that they should be run with the mimimum privileges required (and documented) and not as the superuser.
This prevents the type of regression where additional privleges accidentally become required to perform the same action.
@@ -198,8 +198,8 @@ Here we are setting the `test_user` to have the `kibana_user` role and also role
Tests should normally setRoles() in the before() and restoreDefaults() in the after().
-[float]
-===== Anatomy of a test file
+[discrete]
+==== Anatomy of a test file
This annotated example file shows the basic structure every test suite uses. It starts by importing https://github.com/elastic/kibana/tree/master/packages/kbn-expect[`@kbn/expect`] and defining its default export: an anonymous Test Provider. The test provider then destructures the Provider API for the `getService()` and `getPageObjects()` functions. It uses these functions to collect the dependencies of this suite. The rest of the test file will look pretty normal to mocha.js users. `describe()`, `it()`, `before()` and the lot are used to define suites that happen to automate a browser via services and objects of type `PageObject`.
@@ -222,7 +222,7 @@ export default function ({ getService, getPageObject }) {
describe('My Test Suite', () => {
// most suites start with a before hook that navigates to a specific
- // app/page and restores some archives into elasticsearch with esArchiver
+ // app/page and restores some archives into {es} with esArchiver
before(async () => {
await Promise.all([
// start with an empty .kibana index
@@ -235,7 +235,7 @@ export default function ({ getService, getPageObject }) {
});
// right after the before() hook definition, add the teardown steps
- // that will tidy up elasticsearch for other test suites
+ // that will tidy up {es} for other test suites
after(async () => {
// we unload the empty_kibana archive but not the makelogs
// archive because we don't make any changes to it, and subsequent
@@ -257,9 +257,9 @@ export default function ({ getService, getPageObject }) {
}
----
-[float]
+[discrete]
[[functional_test_runner_provider_api]]
-==== Provider API
+=== Provider API
The first and only argument to all providers is a Provider API Object. This object can be used to load service/page objects and config/test files.
@@ -280,11 +280,11 @@ Within a test Provider the API is exactly the same as the service providers API
[horizontal]
`loadTestFile(path)`::: Load the test file at path in place. Use this method to nest suites from other files into a higher-level suite
-[float]
-==== Service Index
+[discrete]
+=== Service Index
-[float]
-===== Built-in Services
+[discrete]
+==== Built-in Services
The `FunctionalTestRunner` comes with three built-in services:
@@ -304,8 +304,8 @@ The `FunctionalTestRunner` comes with three built-in services:
* Exposes lifecycle events for basic coordination. Handlers can return a promise and resolve/fail asynchronously
* Phases include: `beforeLoadTests`, `beforeTests`, `beforeEachTest`, `cleanup`
-[float]
-===== {kib} Services
+[discrete]
+==== {kib} Services
The {kib} functional tests define the vast majority of the actual functionality used by tests.
@@ -377,7 +377,7 @@ Full list of services that are used in functional tests can be found here: {blob
**Low-level utilities:**:::
* es
** Source: {blob}test/common/services/es.ts[test/common/services/es.ts]
-** Elasticsearch client
+** {es} client
** Higher level options: `kibanaServer.uiSettings` or `esArchiver`
* remote
** Source: {blob}test/functional/services/remote/remote.ts[test/functional/services/remote/remote.ts]
@@ -387,8 +387,8 @@ Full list of services that are used in functional tests can be found here: {blob
** For searching and manipulating with DOM elements, use `testSubjects` and `find` services
** See the https://seleniumhq.github.io/selenium/docs/api/javascript/[selenium-webdriver docs] for the full API.
-[float]
-===== Custom Services
+[discrete]
+==== Custom Services
Services are intentionally generic. They can be literally anything (even nothing). Some services have helpers for interacting with a specific types of UI elements, like `pointSeriesVis`, and others are more foundational, like `log` or `config`. Whenever you want to provide some functionality in a reusable package, consider making a custom service.
@@ -427,8 +427,8 @@ export default function () {
}
-----------
-[float]
-==== PageObjects
+[discrete]
+=== PageObjects
The purpose for each PageObject is pretty self-explanatory. The visualize PageObject provides helpers for interacting with the visualize app, dashboard is the same for the dashboard app, and so on.
@@ -436,13 +436,13 @@ One exception is the "common" PageObject. A holdover from the intern implementat
Please add new methods to existing or new services rather than further expanding the CommonPage class.
-[float]
-==== Gotchas
+[discrete]
+=== Gotchas
Remember that you can’t run an individual test in the file (`it` block) because the whole `describe` needs to be run in order. There should only be one top level `describe` in a file.
-[float]
-===== Functional Test Timing
+[discrete]
+==== Functional Test Timing
Another important gotcha is writing stable tests by being mindful of timing. All methods on `remote` run asynchronously. It’s better to write interactions that wait for changes on the UI to appear before moving onto the next step.
@@ -480,8 +480,8 @@ class AppPage {
Writing in this way will ensure your test timings are not flaky or based on assumptions about UI updates after interactions.
-[float]
-==== Debugging
+[discrete]
+=== Debugging
From the command line run:
@@ -503,8 +503,8 @@ const log = getService(‘log’);
log.debug(‘done clicking menu’);
-----------
-[float]
-==== MacOS testing performance tip
+[discrete]
+=== MacOS testing performance tip
macOS users on a machine with a discrete graphics card may see significant speedups (up to 2x) when running tests by changing your terminal emulator's GPU settings. In iTerm2:
* Open Preferences (Command + ,)
diff --git a/docs/developer/contributing/development-github.asciidoc b/docs/developer/contributing/development-github.asciidoc
index 027b4e73aa9de..a6d4e29940487 100644
--- a/docs/developer/contributing/development-github.asciidoc
+++ b/docs/developer/contributing/development-github.asciidoc
@@ -1,16 +1,16 @@
[[development-github]]
-=== How we use git and github
+== How we use git and github
-[float]
-==== Forking
+[discrete]
+=== Forking
We follow the https://help.github.com/articles/fork-a-repo/[GitHub
forking model] for collaborating on {kib} code. This model assumes that
you have a remote called `upstream` which points to the official {kib}
repo, which we'll refer to in later code snippets.
-[float]
-==== Branching
+[discrete]
+=== Branching
* All work on the next major release goes into master.
* Past major release branches are named `{majorVersion}.x`. They contain
@@ -24,8 +24,8 @@ if the next patch release is `5.3.1`, work for it should go into the
branches.
* Where appropriate, we'll backport changes into older release branches.
-[float]
-==== Commits and Merging
+[discrete]
+=== Commits and Merging
* Feel free to make as many commits as you want, while working on a
branch.
@@ -38,8 +38,8 @@ explanation of _why_ you made the changes that you did.
feature branch, and force-pushing (see below for instructions).
* When merging, we'll squash your commits into a single commit.
-[float]
-===== Rebasing and fixing merge conflicts
+[discrete]
+==== Rebasing and fixing merge conflicts
Rebasing can be tricky, and fixing merge conflicts can be even trickier
because it involves force pushing. This is all compounded by the fact
@@ -106,7 +106,7 @@ hint: See the 'Note about fast-forwards' in 'git push --help' for details.
Assuming you've successfully rebased and you're happy with the code, you should force push instead.
-[float]
-==== Creating a pull request
+[discrete]
+=== Creating a pull request
See <> for the next steps on getting your code changes merged into {kib}.
\ No newline at end of file
diff --git a/docs/developer/contributing/development-pull-request.asciidoc b/docs/developer/contributing/development-pull-request.asciidoc
index 5d3c30fec7383..070eff449af5b 100644
--- a/docs/developer/contributing/development-pull-request.asciidoc
+++ b/docs/developer/contributing/development-pull-request.asciidoc
@@ -1,16 +1,16 @@
[[development-pull-request]]
-=== Submitting a pull request
+== Submitting a pull request
-[float]
-==== What Goes Into a Pull Request
+[discrete]
+=== What Goes Into a Pull Request
* Please include an explanation of your changes in your PR description.
* Links to relevant issues, external resources, or related PRs are very important and useful.
* Please update any tests that pertain to your code, and add new tests where appropriate.
* Update or add docs when appropriate. Read more about <>.
-[float]
-==== Submitting a Pull Request
+[discrete]
+=== Submitting a Pull Request
1. Push your local changes to your forked copy of the repository and submit a pull request.
2. Describe what your changes do and mention the number of the issue where discussion has taken place, e.g., “Closes #123″.
@@ -22,8 +22,8 @@ Always submit your pull against master unless the bug is only present in an olde
Then sit back and wait. There will probably be discussion about the Pull Request and, if any changes are needed, we'll work with you to get your Pull Request merged into {kib}.
-[float]
-==== What to expect during the pull request review process
+[discrete]
+=== What to expect during the pull request review process
Most PRs go through several iterations of feedback and updates. Depending on the scope and complexity of the PR, the process can take weeks. Please
be patient and understand we hold our code base to a high standard.
diff --git a/docs/developer/contributing/development-tests.asciidoc b/docs/developer/contributing/development-tests.asciidoc
index b470ea61669b2..78a2a90b69ce5 100644
--- a/docs/developer/contributing/development-tests.asciidoc
+++ b/docs/developer/contributing/development-tests.asciidoc
@@ -1,10 +1,10 @@
[[development-tests]]
-=== Testing
+== Testing
To ensure that your changes will not break other functionality, please run the test suite and build (<>) before submitting your Pull Request.
-[float]
-==== Running specific {kib} tests
+[discrete]
+=== Running specific {kib} tests
The following table outlines possible test file locations and how to
invoke them:
@@ -47,8 +47,8 @@ Examples: - Run the entire elasticsearch_service test suite:
string: ``` yarn test:ftr:server –config test/api_integration/config.js
yarn test:ftr:runner –config test/api_integration/config
-[float]
-==== Cross-browser compatibility
+[discrete]
+=== Cross-browser compatibility
**Testing IE on OS X**
@@ -71,8 +71,8 @@ your computer name).
`http://computer.local:5601` to test {kib}.
* Alternatively you can use browserstack
-[float]
-==== Running browser automation tests
+[discrete]
+=== Running browser automation tests
Check out <> to learn more about how you can run
and develop functional tests for {kib} core and plugins.
@@ -80,17 +80,17 @@ and develop functional tests for {kib} core and plugins.
You can also look into the {kib-repo}tree/{branch}/scripts/README.md[Scripts README.md]
to learn more about using the node scripts we provide for building
{kib}, running integration tests, and starting up {kib} and
-Elasticsearch while you develop.
+{es} while you develop.
-[float]
+[discrete]
==== More testing information:
* <>
* <>
* <>
-include::development-functional-tests.asciidoc[]
+include::development-functional-tests.asciidoc[leveloffset=+1]
-include::development-unit-tests.asciidoc[]
+include::development-unit-tests.asciidoc[leveloffset=+1]
-include::development-accessibility-tests.asciidoc[]
\ No newline at end of file
+include::development-accessibility-tests.asciidoc[leveloffset=+1]
\ No newline at end of file
diff --git a/docs/developer/contributing/development-unit-tests.asciidoc b/docs/developer/contributing/development-unit-tests.asciidoc
index 0009533c9a7c4..8b4954150bb5b 100644
--- a/docs/developer/contributing/development-unit-tests.asciidoc
+++ b/docs/developer/contributing/development-unit-tests.asciidoc
@@ -1,11 +1,11 @@
[[development-unit-tests]]
-==== Unit testing frameworks
+== Unit testing frameworks
{kib} is migrating unit testing from `Mocha` to `Jest`. Legacy unit tests
still exist in Mocha but all new unit tests should be written in Jest.
-[float]
-===== Mocha (legacy)
+[discrete]
+=== Mocha (legacy)
Mocha tests are contained in `__tests__` directories.
@@ -16,8 +16,8 @@ Mocha tests are contained in `__tests__` directories.
yarn test:mocha
-----------
-[float]
-==== Jest
+[discrete]
+== Jest
Jest tests are stored in the same directory as source code files with the `.test.{js,mjs,ts,tsx}` suffix.
*Running Jest Unit Tests*
@@ -27,8 +27,8 @@ Jest tests are stored in the same directory as source code files with the `.test
yarn test:jest
-----------
-[float]
-====== Writing Jest Unit Tests
+[discrete]
+==== Writing Jest Unit Tests
In order to write those tests there are two main things you need to be aware of.
The first one is the different between `jest.mock` and `jest.doMock`
@@ -37,8 +37,8 @@ test files with `babel-jest` both techniques are needed
specially for the tests implemented on Typescript in order to benefit from the
auto-inference types feature.
-[float]
-====== Jest.mock vs Jest.doMock
+[discrete]
+==== Jest.mock vs Jest.doMock
Both methods are essentially the same on their roots however the `jest.mock`
calls will get hoisted to the top of the file and can only reference variables
@@ -47,8 +47,8 @@ reference pretty much any variable we want, however we have to assure those refe
variables are instantiated at the time we need them which lead us to the next
section where we'll talk about our jest mock files pattern.
-[float]
-====== Jest Mock Files Pattern
+[discrete]
+==== Jest Mock Files Pattern
Specially on typescript it is pretty common to have in unit tests
`jest.doMock` calls which reference for example imported types. Any error
@@ -76,9 +76,9 @@ like: `import * as Mocks from './mymodule.test.mocks'`,
or just `import './mymodule.test.mocks'` if there isn't anything
exported to be used.
-[float]
+[discrete]
[[debugging-unit-tests]]
-===== Debugging Unit Tests
+=== Debugging Unit Tests
The standard `yarn test` task runs several sub tasks and can take
several minutes to complete, making debugging failures pretty painful.
@@ -127,8 +127,8 @@ description.
image:http://i.imgur.com/DwHxgfq.png[Browser test debugging]
-[float]
-===== Unit Testing Plugins
+[discrete]
+=== Unit Testing Plugins
This should work super if you’re using the
https://github.com/elastic/kibana/tree/master/packages/kbn-plugin-generator[Kibana
diff --git a/docs/developer/contributing/index.asciidoc b/docs/developer/contributing/index.asciidoc
index 4f987f31cf1f6..99ab83bc2f073 100644
--- a/docs/developer/contributing/index.asciidoc
+++ b/docs/developer/contributing/index.asciidoc
@@ -23,7 +23,7 @@ Read <> to get your environment up and running, the
Please make sure you have signed the [Contributor License Agreement](http://www.elastic.co/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once.
-[float]
+[discrete]
[[kibana-localization]]
=== Localization
@@ -32,7 +32,7 @@ Read <> for details on our localization prac
Note that we cannot support accepting contributions to the translations from any source other than the translators we have engaged to do the work.
We are still to develop a proper process to accept any contributed translations. We certainly appreciate that people care enough about the localization effort to want to help improve the quality. We aim to build out a more comprehensive localization process for the future and will notify you once contributions can be supported, but for the time being, we are not able to incorporate suggestions.
-[float]
+[discrete]
[[kibana-release-notes-process]]
=== Release Notes Process
@@ -43,7 +43,7 @@ access to GitHub labels.
The Release Notes summarize what the PRs accomplish in language that is meaningful to users.
To generate the Release Notes, the team runs a script against this repo to collect the merged PRs against the release.
-[float]
+[discrete]
==== Create the Release Notes text
The text that appears in the Release Notes is pulled directly from your PR title, or a single paragraph of text that you specify in the PR description.
@@ -59,7 +59,7 @@ When you create the Release Notes text, use the following best practices:
* When you create a bug fix PR, start with `Fixes`.
* When you create a deprecation PR, start with `Deprecates`.
-[float]
+[discrete]
==== Add your labels
[arabic]
@@ -72,18 +72,18 @@ When you create the Release Notes text, use the following best practices:
* To **NOT** include your changes in the Release Notes, use `release_note:skip`.
-include::development-github.asciidoc[]
+include::development-github.asciidoc[leveloffset=+1]
-include::development-tests.asciidoc[]
+include::development-tests.asciidoc[leveloffset=+1]
-include::interpreting-ci-failures.asciidoc[]
+include::interpreting-ci-failures.asciidoc[leveloffset=+1]
-include::development-documentation.asciidoc[]
+include::development-documentation.asciidoc[leveloffset=+1]
-include::development-pull-request.asciidoc[]
+include::development-pull-request.asciidoc[leveloffset=+1]
-include::kibana-issue-reporting.asciidoc[]
+include::kibana-issue-reporting.asciidoc[leveloffset=+1]
-include::pr-review.asciidoc[]
+include::pr-review.asciidoc[leveloffset=+1]
-include::linting.asciidoc[]
+include::linting.asciidoc[leveloffset=+1]
diff --git a/docs/developer/contributing/interpreting-ci-failures.asciidoc b/docs/developer/contributing/interpreting-ci-failures.asciidoc
index ba3999a310198..bb623bc7a541c 100644
--- a/docs/developer/contributing/interpreting-ci-failures.asciidoc
+++ b/docs/developer/contributing/interpreting-ci-failures.asciidoc
@@ -1,19 +1,19 @@
[[interpreting-ci-failures]]
-=== Interpreting CI Failures
+== Interpreting CI Failures
{kib} CI uses a Jenkins feature called "Pipelines" to automate testing of the code in pull requests and on tracked branches. Pipelines are defined within the repository via the `Jenkinsfile` at the root of the project.
More information about Jenkins Pipelines can be found link:https://jenkins.io/doc/book/pipeline/[in the Jenkins book].
-[float]
-==== Github Checks
+[discrete]
+=== Github Checks
When a test fails it will be reported to Github via Github Checks. We currently bucket tests into several categories which run in parallel to make CI faster. Groups like `ciGroup{X}` get a single check in Github, and other tests like linting, or type checks, get their own checks.
Clicking the link next to the check in the conversation tab of a pull request will take you to the log output from that section of the tests. If that log output is truncated, or doesn't clearly identify what happened, you can usually get more complete information by visiting Jenkins directly.
-[float]
-==== Viewing Job Executions in Jenkins
+[discrete]
+=== Viewing Job Executions in Jenkins
To view the results of a job execution in Jenkins, either click the link in the comment left by `@elasticmachine` or search for the `kibana-ci` check in the list at the bottom of the PR. This link will take you to the top-level page for the specific job execution that failed.
@@ -24,8 +24,8 @@ image::images/job_view.png[]
3. *Google Cloud Storage (GCS) Upload Report:* Link to the screen which lists out the artifacts uploaded to GCS during this job execution.
4. *Pipeline Steps:*: A breakdown of the pipline that was executed, along with individual log output for each step in the pipeline.
-[float]
-==== Viewing ciGroup/test Logs
+[discrete]
+=== Viewing ciGroup/test Logs
To view the logs for a failed specific ciGroup, jest, mocha, type checkers, linters, etc., click on the *Pipeline Steps* link in from the Job page.
diff --git a/docs/developer/contributing/kibana-issue-reporting.asciidoc b/docs/developer/contributing/kibana-issue-reporting.asciidoc
index 36c50b612d675..63366ae2aa6bb 100644
--- a/docs/developer/contributing/kibana-issue-reporting.asciidoc
+++ b/docs/developer/contributing/kibana-issue-reporting.asciidoc
@@ -1,8 +1,8 @@
[[kibana-issue-reporting]]
-=== Effective issue reporting in {kib}
+== Effective issue reporting in {kib}
-[float]
-==== Voicing the importance of an issue
+[discrete]
+=== Voicing the importance of an issue
We seriously appreciate thoughtful comments. If an issue is important to
you, add a comment with a solid write up of your use case and explain
@@ -17,8 +17,8 @@ https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comments[
thumbs up reaction] on the issue itself and on the comment which best
summarizes your thoughts.
-[float]
-==== "`My issue isn’t getting enough attention`"
+[discrete]
+=== "`My issue isn’t getting enough attention`"
First of all, *sorry about that!* We want you to have a great time with
{kib}.
@@ -31,8 +31,8 @@ more pressing issues.
Feel free to bump your issues if you think they’ve been neglected for a
prolonged period.
-[float]
-==== "`I want to help!`"
+[discrete]
+=== "`I want to help!`"
*Now we’re talking*. If you have a bug fix or new feature that you would
like to contribute to {kib}, please *find or open an issue about it
diff --git a/docs/developer/contributing/linting.asciidoc b/docs/developer/contributing/linting.asciidoc
index 234bd90478907..0d05afa504538 100644
--- a/docs/developer/contributing/linting.asciidoc
+++ b/docs/developer/contributing/linting.asciidoc
@@ -1,5 +1,5 @@
[[kibana-linting]]
-=== Linting
+== Linting
A note about linting: We use http://eslint.org[eslint] to check that the
link:STYLEGUIDE.md[styleguide] is being followed. It runs in a
@@ -34,8 +34,8 @@ for your editor, and browse our
https://github.com/elastic/kibana/blob/master/.editorconfig[`.editorconfig`]
file to see what config rules we set up.
-[float]
-==== Setup Guide for VS Code Users
+[discrete]
+== Setup Guide for VS Code Users
Note that for VSCode, to enable "`live`" linting of TypeScript (and
other) file types, you will need to modify your local settings, as shown
diff --git a/docs/developer/contributing/pr-review.asciidoc b/docs/developer/contributing/pr-review.asciidoc
index ebab3b24aaaee..885725795b0b9 100644
--- a/docs/developer/contributing/pr-review.asciidoc
+++ b/docs/developer/contributing/pr-review.asciidoc
@@ -1,5 +1,5 @@
[[pr-review]]
-=== Pull request review guidelines
+== Pull request review guidelines
Every change made to {kib} must be held to a high standard, and while the responsibility for quality in a pull request ultimately lies with the author, {kib} team members have the responsibility as reviewers to verify during their review process.
@@ -10,24 +10,24 @@ It is not expected nor intended for a PR review to take the shape of this docume
While the review process is always done by Elastic staff members, these guidelines apply to all pull requests regardless of whether they are authored by community members or Elastic staff.
-[float]
-==== Target audience
+[discrete]
+=== Target audience
The target audience for this document are pull request reviewers. For {kib} maintainers, the PR review is the only part of the contributing process in which we have complete control. The author of any given pull request may not be up to speed on the latest expectations we have for pull requests, and they may have never read our guidelines at all. It's our responsibility as reviewers to guide folks through this process, but it's hard to do that consistently without a common set of documented principles.
Pull request authors can benefit from reading this document as well because it'll help establish a common set of expectations between authors and reviewers early.
-[float]
-==== Reject fast
+[discrete]
+=== Reject fast
Every pull request is different, and before reviewing any given PR, reviewers should consider the optimal way to approach the PR review so that if the change is ultimately rejected, it is done so as early in the process as possible.
For example, a reviewer may want to do a product level review as early as possible for a PR that includes a new UI feature. On the other hand, perhaps the author is submitting a new feature that has been rejected in the past due to key architectural decisions, in which case it may be appropriate for the reviewer to focus on the soundness of the architecture before diving into anything else.
-[float]
-==== The big three
+[discrete]
+=== The big three
There are a lot of discrete requirements and guidelines we want to follow in all of our pull requests, but three things in particular stand out as important above all the rest.
@@ -58,20 +58,20 @@ This isn't simply a question of enough test files. The code in the tests themsel
All of our code should have unit tests that verify its behaviors, including not only the "happy path", but also edge cases, error handling, etc. When you change an existing API of a module, then there should always be at least one failing unit test, which in turn means we need to verify that all code consuming that API properly handles the change if necessary. For modules at a high enough level, this will mean we have breaking change in the product, which we'll need to handle accordingly.
-In addition to extensive unit test coverage, PRs should include relevant functional and integration tests. In some cases, we may simply be testing a programmatic interface (e.g. a service) that is integrating with the file system, the network, Elasticsearch, etc. In other cases, we'll be testing REST APIs over HTTP or comparing screenshots/snapshots with prior known acceptable state. In the worst case, we are doing browser-based functional testing on a running instance of {kib} using selenium.
+In addition to extensive unit test coverage, PRs should include relevant functional and integration tests. In some cases, we may simply be testing a programmatic interface (e.g. a service) that is integrating with the file system, the network, {es}, etc. In other cases, we'll be testing REST APIs over HTTP or comparing screenshots/snapshots with prior known acceptable state. In the worst case, we are doing browser-based functional testing on a running instance of {kib} using selenium.
Enhancements are pretty much always going to have extensive unit tests as a base as well as functional and integration testing. Bug fixes should always include regression tests to ensure that same bug does not manifest again in the future.
--
-[float]
-==== Product level review
+[discrete]
+=== Product level review
Reviewers are not simply evaluating the code itself, they are also evaluating the quality of the user-facing change in the product. This generally means they need to check out the branch locally and "play around" with it. In addition to the "do we want this change in the product" details, the reviewer should be looking for bugs and evaluating how approachable and useful the feature is as implemented. Special attention should be given to error scenarios and edge cases to ensure they are all handled well within the product.
-[float]
-==== Consistency, style, readability
+[discrete]
+=== Consistency, style, readability
Having a relatively consistent codebase is an important part of us building a sustainable project. With dozens of active contributors at any given time, we rely on automation to help ensure consistency - we enforce a comprehensive set of linting rules through CI. We're also rolling out prettier to make this even more automatic.
@@ -86,8 +86,8 @@ When in doubt, relying on "prior art" in the codebase, especially in and around
There may also be times when a person is inspired by a particular contribution to introduce a new way to style code that we already have different style guidelines or "prior art" for. It's OK to bring this up in a pull request, but ultimately that discussion should branch off into a separate issue or pull request to update the appropriate guide. If this change is prompted by a reviewer, then the original PR should not be blocked on this. If the change is prompted by the author, then they can either update the PR to be consistent with our existing guidelines (preferred) or they can choose to block the PR entirely on that separate styleguide discussion.
-[float]
-==== Nitpicking
+[discrete]
+=== Nitpicking
Nitpicking is when a reviewer identifies trivial and unimportant details in a pull request and asks the author to change them. This is a completely subjective category that is impossible to define universally, and it's equally impractical to define a blanket policy on nitpicking that everyone will be happy with.
@@ -96,14 +96,14 @@ Reviewers should feel comfortable giving any feedback they have on a pull reques
Often, reviewers have an opinion about whether the feedback they are about to give is a nitpick or not. While not required, it can be really helpful to identify that feedback as such, for example "nit: a newline after this would be helpful". This helps the author understand your intention.
-[float]
-==== Handling disagreements
+[discrete]
+=== Handling disagreements
Conflicting opinions between reviewers and authors happen, and sometimes it is hard to reconcile those opinions. Ideally folks can work together in the spirit of these guidelines toward a consensus, but if that doesn't work out it may be best to bring a third person into the discussion. Our pull requests generally have two reviewers, so an appropriate third person may already be obvious. Otherwise, reach out to the functional area that is most appropriate or to technical leadership if an area isn't obvious.
-[float]
-==== Inappropriate review feedback
+[discrete]
+=== Inappropriate review feedback
Whether or not a bit of feedback is appropriate for a pull request is often dependent on the motivation for giving the feedback in the first place.
@@ -112,8 +112,8 @@ _Demanding_ an author make changes based primarily on the mindset of "how would
Inflammatory feedback such as "this is crap" isn't feedback at all. It's both mean and unhelpful, and it is never appropriate.
-[float]
-==== A checklist
+[discrete]
+=== A checklist
Establishing a comprehensive checklist for all of the things that should happen in all possible pull requests is impractical, but that doesn't mean we lack a concrete set of minimum requirements that we can enumerate. The following items should be double checked for any pull request:
diff --git a/docs/developer/getting-started/building-kibana.asciidoc b/docs/developer/getting-started/building-kibana.asciidoc
index e1f1ca336a5da..72054b1628fc2 100644
--- a/docs/developer/getting-started/building-kibana.asciidoc
+++ b/docs/developer/getting-started/building-kibana.asciidoc
@@ -1,5 +1,5 @@
[[building-kibana]]
-=== Building a {kib} distributable
+== Building a {kib} distributable
The following commands will build a {kib} production distributable.
@@ -15,8 +15,8 @@ You can get all build options using the following command:
yarn build --help
----
-[float]
-==== Building OS packages
+[discrete]
+=== Building OS packages
Packages are built using fpm, dpkg, and rpm. Package building has only been tested on Linux and is not supported on any other platform.
diff --git a/docs/developer/getting-started/debugging.asciidoc b/docs/developer/getting-started/debugging.asciidoc
index b369dcda748af..a3fb12ec1f6a3 100644
--- a/docs/developer/getting-started/debugging.asciidoc
+++ b/docs/developer/getting-started/debugging.asciidoc
@@ -1,15 +1,15 @@
[[kibana-debugging]]
-=== Debugging {kib}
+== Debugging {kib}
For information about how to debug unit tests, refer to <>.
-[float]
-==== Server Code
+[discrete]
+=== Server Code
`yarn debug` will start the server with Node's inspect flag. {kib}'s development mode will start three processes on ports `9229`, `9230`, and `9231`. Chrome's developer tools need to be configured to connect to all three connections. Add `localhost:` for each {kib} process in Chrome's developer tools connection tab.
-[float]
-==== Instrumenting with Elastic APM
+[discrete]
+=== Instrumenting with Elastic APM
{kib} ships with the
https://github.com/elastic/apm-agent-nodejs[Elastic APM Node.js Agent]
@@ -18,7 +18,7 @@ built-in for debugging purposes.
Its default configuration is meant to be used by core {kib} developers
only, but it can easily be re-configured to your needs. In its default
configuration it’s disabled and will, once enabled, send APM data to a
-centrally managed Elasticsearch cluster accessible only to Elastic
+centrally managed {es} cluster accessible only to Elastic
employees.
To change the location where data is sent, use the
diff --git a/docs/developer/getting-started/development-plugin-resources.asciidoc b/docs/developer/getting-started/development-plugin-resources.asciidoc
index dfe8efc4fef57..8f81138b81ed7 100644
--- a/docs/developer/getting-started/development-plugin-resources.asciidoc
+++ b/docs/developer/getting-started/development-plugin-resources.asciidoc
@@ -1,14 +1,14 @@
[[development-plugin-resources]]
-=== Plugin Resources
+== Plugin Resources
Here are some resources that are helpful for getting started with plugin development.
-[float]
-==== Some light reading
+[discrete]
+=== Some light reading
If you haven't already, start with <>. If you are planning to add your plugin to the {kib} repo, read the <> guide, if you are building a plugin externally, read <>. In both cases, read up on our recommended <>.
-[float]
-==== Creating an empty plugin
+[discrete]
+=== Creating an empty plugin
You can use the <> to get a basic structure for a new plugin. Plugins that are not part of the
{kib} repo should be developed inside the `plugins` folder. If you are building a new plugin to check in to the {kib} repo,
@@ -18,15 +18,15 @@ you will choose between a few locations:
- {kib-repo}tree/{branch}/src/plugins[src/plugins] for open source licensed plugins
- {kib-repo}tree/{branch}/examples[examples] for developer example plugins (these will not be included in the distributables)
-[float]
-==== Elastic UI Framework
+[discrete]
+=== Elastic UI Framework
If you're developing a plugin that has a user interface, take a look at our https://elastic.github.io/eui[Elastic UI Framework].
It documents the CSS and React components we use to build {kib}'s user interface.
You're welcome to use these components, but be aware that they are rapidly evolving, and we might introduce breaking changes that will disrupt your plugin's UI.
-[float]
-==== TypeScript Support
+[discrete]
+=== TypeScript Support
We recommend your plugin code is written in http://www.typescriptlang.org/[TypeScript].
To enable TypeScript support, create a `tsconfig.json` file at the root of your plugin that looks something like this:
@@ -48,14 +48,14 @@ TypeScript code is automatically converted into JavaScript during development,
but not in the distributable version of {kib}. If you use the
{kib-repo}blob/{branch}/packages/kbn-plugin-helpers[@kbn/plugin-helpers] to build your plugin, then your `.ts` and `.tsx` files will be permanently transpiled before your plugin is archived. If you have your own build process, make sure to run the TypeScript compiler on your source files and ship the compilation output so that your plugin will work with the distributable version of {kib}.
-[float]
-==== {kib} platform migration guide
+[discrete]
+=== {kib} platform migration guide
{kib-repo}blob/{branch}/src/core/MIGRATION.md#migrating-legacy-plugins-to-the-new-platform[This guide]
provides an action plan for moving a legacy plugin to the new platform.
-[float]
-==== Externally developed plugins
+[discrete]
+=== Externally developed plugins
If you are building a plugin outside of the {kib} repo, read <>.
diff --git a/docs/developer/getting-started/index.asciidoc b/docs/developer/getting-started/index.asciidoc
index 47c4a52daf303..2ac51b6cf86f8 100644
--- a/docs/developer/getting-started/index.asciidoc
+++ b/docs/developer/getting-started/index.asciidoc
@@ -3,7 +3,7 @@
Get started building your own plugins, or contributing directly to the {kib} repo.
-[float]
+[discrete]
[[get-kibana-code]]
=== Get the code
@@ -15,7 +15,7 @@ git clone https://github.com/[YOUR_USERNAME]/kibana.git kibana
cd kibana
----
-[float]
+[discrete]
=== Install dependencies
Install the version of Node.js listed in the `.node-version` file. This
@@ -67,11 +67,11 @@ corrupted packages in your yarn cache which you can clean with:
yarn cache clean
----
-[float]
+[discrete]
=== Configure environmental settings
[[increase-nodejs-heap-size]]
-[float]
+[discrete]
==== Increase node.js heap size
{kib} is a big project and for some commands it can happen that the
@@ -81,10 +81,10 @@ by setting the `--max_old_space_size` option on the command line. To set
the limit for all commands, simply add the following line to your shell
config: `export NODE_OPTIONS="--max_old_space_size=2048"`.
-[float]
-=== Run Elasticsearch
+[discrete]
+=== Run {es}
-Run the latest Elasticsearch snapshot. Specify an optional license with the `--license` flag.
+Run the latest {es} snapshot. Specify an optional license with the `--license` flag.
[source,bash]
----
@@ -96,7 +96,7 @@ yarn es snapshot --license trial
Read about more options for <>, like connecting to a remote host, running from source,
preserving data inbetween runs, running remote cluster, etc.
-[float]
+[discrete]
=== Run {kib}
In another terminal window, start up {kib}. Include developer examples by adding an optional `--run-examples` flag.
@@ -110,13 +110,13 @@ View all available options by running `yarn start --help`
Read about more advanced options for <>.
-[float]
+[discrete]
=== Code away!
You are now ready to start developing. Changes to your files should be picked up automatically. Server side changes will
cause the {kib} server to reboot.
-[float]
+[discrete]
=== More information
* <>
@@ -129,12 +129,12 @@ cause the {kib} server to reboot.
* <>
-include::running-kibana-advanced.asciidoc[]
+include::running-kibana-advanced.asciidoc[leveloffset=+1]
-include::sample-data.asciidoc[]
+include::sample-data.asciidoc[leveloffset=+1]
-include::debugging.asciidoc[]
+include::debugging.asciidoc[leveloffset=+1]
-include::building-kibana.asciidoc[]
+include::building-kibana.asciidoc[leveloffset=+1]
-include::development-plugin-resources.asciidoc[]
\ No newline at end of file
+include::development-plugin-resources.asciidoc[leveloffset=+1]
\ No newline at end of file
diff --git a/docs/developer/getting-started/running-kibana-advanced.asciidoc b/docs/developer/getting-started/running-kibana-advanced.asciidoc
index e36f38de1b366..c3b7847b0f8ba 100644
--- a/docs/developer/getting-started/running-kibana-advanced.asciidoc
+++ b/docs/developer/getting-started/running-kibana-advanced.asciidoc
@@ -1,5 +1,5 @@
[[running-kibana-advanced]]
-=== Running {kib}
+== Running {kib}
Change to your local {kib} directory. Start the development server.
@@ -23,8 +23,8 @@ By default, you can log in with username `elastic` and password
`changeme`. See the `--help` options on `yarn es ` if
you’d like to configure a different password.
-[float]
-==== Running {kib} in Open-Source mode
+[discrete]
+=== Running {kib} in Open-Source mode
If you’re looking to only work with the open-source software, supply the
license type to `yarn es`:
@@ -41,8 +41,8 @@ And start {kib} with only open-source code:
yarn start --oss
----
-[float]
-==== Unsupported URL Type
+[discrete]
+=== Unsupported URL Type
If you’re installing dependencies and seeing an error that looks
something like
@@ -56,9 +56,9 @@ need to run `yarn kbn bootstrap`. For more info, see
link:#setting-up-your-development-environment[Setting Up Your
Development Environment] above.
-[float]
+[discrete]
[[customize-kibana-yml]]
-==== Customizing `config/kibana.dev.yml`
+=== Customizing `config/kibana.dev.yml`
The `config/kibana.yml` file stores user configuration directives.
Since this file is checked into source control, however, developer
@@ -70,8 +70,8 @@ non-dev version and accepts any of the
https://www.elastic.co/guide/en/kibana/current/settings.html[standard
settings].
-[float]
-==== Potential Optimization Pitfalls
+[discrete]
+=== Potential Optimization Pitfalls
* Webpack is trying to include a file in the bundle that I deleted and
is now complaining about it is missing
@@ -79,9 +79,9 @@ is now complaining about it is missing
directory, but webpack isn’t adapting
* (if you discover other scenarios, please send a PR!)
-[float]
-==== Setting Up SSL
+[discrete]
+=== Setting Up SSL
{kib} includes self-signed certificates that can be used for
development purposes in the browser and for communicating with
-Elasticsearch: `yarn start --ssl` & `yarn es snapshot --ssl`.
\ No newline at end of file
+{es}: `yarn start --ssl` & `yarn es snapshot --ssl`.
\ No newline at end of file
diff --git a/docs/developer/getting-started/sample-data.asciidoc b/docs/developer/getting-started/sample-data.asciidoc
index 376211ceb2634..0d313cbabe64e 100644
--- a/docs/developer/getting-started/sample-data.asciidoc
+++ b/docs/developer/getting-started/sample-data.asciidoc
@@ -1,17 +1,17 @@
[[sample-data]]
-=== Installing sample data
+== Installing sample data
-There are a couple ways to easily get data ingested into Elasticsearch.
+There are a couple ways to easily get data ingested into {es}.
-[float]
-==== Sample data packages available for one click installation
+[discrete]
+=== Sample data packages available for one click installation
The easiest is to install one or more of our vailable sample data packages. If you have no data, you should be
prompted to install when running {kib} for the first time. You can also access and install the sample data packages
by going to the home page and clicking "add sample data".
-[float]
-==== makelogs script
+[discrete]
+=== makelogs script
The provided `makelogs` script will generate sample data.
@@ -22,10 +22,10 @@ node scripts/makelogs --auth :
The default username and password combination are `elastic:changeme`
-Make sure to execute `node scripts/makelogs` *after* elasticsearch is up and running!
+Make sure to execute `node scripts/makelogs` *after* {es} is up and running!
-[float]
-==== CSV upload
+[discrete]
+=== CSV upload
If running with a platinum or trial license, you can also use the CSV uploader provided inside the Machine learning app.
Navigate to the Data visualizer to upload your data from a file.
\ No newline at end of file
diff --git a/docs/developer/plugin/external-plugin-functional-tests.asciidoc b/docs/developer/plugin/external-plugin-functional-tests.asciidoc
index 44f636d627011..706bf6af8ed9b 100644
--- a/docs/developer/plugin/external-plugin-functional-tests.asciidoc
+++ b/docs/developer/plugin/external-plugin-functional-tests.asciidoc
@@ -1,10 +1,10 @@
[[external-plugin-functional-tests]]
-=== Functional Tests for Plugins outside the {kib} repo
+== Functional Tests for Plugins outside the {kib} repo
Plugins use the `FunctionalTestRunner` by running it out of the {kib} repo. Ensure that your {kib} Development Environment is setup properly before continuing.
-[float]
-==== Writing your own configuration
+[discrete]
+=== Writing your own configuration
Every project or plugin should have its own `FunctionalTestRunner` config file. Just like {kib}'s, this config file will define all of the test files to load, providers for Services and PageObjects, as well as configuration options for certain services.
@@ -82,8 +82,8 @@ From the root of your repo you should now be able to run the `FunctionalTestRunn
node ../../kibana/scripts/functional_test_runner
-----------
-[float]
-==== Using esArchiver
+[discrete]
+=== Using esArchiver
We're working on documentation for this, but for now the best place to look is the original {kib-repo}/issues/10359[pull request].
diff --git a/docs/developer/plugin/external-plugin-localization.asciidoc b/docs/developer/plugin/external-plugin-localization.asciidoc
index c151832ab53fa..d30dec1a8f46b 100644
--- a/docs/developer/plugin/external-plugin-localization.asciidoc
+++ b/docs/developer/plugin/external-plugin-localization.asciidoc
@@ -1,10 +1,10 @@
[[external-plugin-localization]]
-=== Localization for plugins outside the {kib} repo
+== Localization for plugins outside the {kib} repo
To introduce localization for your plugin, use our i18n tool to create IDs and default messages. You can then extract these IDs with respective default messages into localization JSON files for {kib} to use when running your plugin.
-[float]
-==== Adding localization to your plugin
+[discrete]
+=== Adding localization to your plugin
You must add a `translations` directory at the root of your plugin. This directory will contain the translation files that {kib} uses.
@@ -19,8 +19,8 @@ You must add a `translations` directory at the root of your plugin. This directo
-----------
-[float]
-==== Using {kib} i18n tooling
+[discrete]
+=== Using {kib} i18n tooling
To simplify the localization process, {kib} provides tools for the following functions:
* Verify all translations have translatable strings and extract default messages from templates
@@ -51,8 +51,8 @@ An example {kib} `.i18nrc.json` is {blob}.i18nrc.json[here].
Full documentation about i18n tooling is {blob}src/dev/i18n/README.md[here].
-[float]
-==== Extracting default messages
+[discrete]
+=== Extracting default messages
To extract the default messages from your plugin, run the following command:
["source","shell"]
@@ -62,8 +62,8 @@ node scripts/i18n_extract --output-dir ./translations --include-config ../kibana
This outputs a `en.json` file inside the `translations` directory. To localize other languages, clone the file and translate each string.
-[float]
-==== Checking i18n messages
+[discrete]
+=== Checking i18n messages
Checking i18n does the following:
@@ -80,8 +80,8 @@ node scripts/i18n_check --fix --include-config ../kibana-extra/myPlugin/.i18nrc.
-----------
-[float]
-==== Implementing i18n in the UI
+[discrete]
+=== Implementing i18n in the UI
{kib} relies on several UI frameworks (ReactJS and AngularJS) and
requires localization in different environments (browser and NodeJS).
@@ -97,8 +97,8 @@ so both React and AngularJS frameworks use the same engine and the same
message syntax.
-[float]
-===== i18n for vanilla JavaScript
+[discrete]
+==== i18n for vanilla JavaScript
["source","js"]
-----------
@@ -111,8 +111,8 @@ export const HELLO_WORLD = i18n.translate('hello.wonderful.world', {
Full details are {kib-repo}tree/master/packages/kbn-i18n#vanilla-js[here].
-[float]
-===== i18n for React
+[discrete]
+==== i18n for React
To localize strings in React, use either `FormattedMessage` or `i18n.translate`.
@@ -137,8 +137,8 @@ Full details are {kib-repo}tree/master/packages/kbn-i18n#react[here].
-[float]
-===== i18n for Angular
+[discrete]
+==== i18n for Angular
You are encouraged to use `i18n.translate()` by statically importing `i18n` from `@kbn/i18n` wherever possible in your Angular code. Angular wrappers use the translation `service` with the i18n engine under the hood.
@@ -156,8 +156,8 @@ The translation directive has the following syntax:
Full details are {kib-repo}tree/master/packages/kbn-i18n#angularjs[here].
-[float]
-==== Resources
+[discrete]
+=== Resources
To learn more about i18n tooling, see {blob}src/dev/i18n/README.md[i18n dev tooling].
diff --git a/docs/developer/plugin/index.asciidoc b/docs/developer/plugin/index.asciidoc
index 73f1d2c908fa7..dd83cf234dea4 100644
--- a/docs/developer/plugin/index.asciidoc
+++ b/docs/developer/plugin/index.asciidoc
@@ -9,9 +9,9 @@ The {kib} plugin interfaces are in a state of constant development. We cannot p
Most developers who contribute code directly to the {kib} repo are writing code inside plugins, so our <> docs are the best place to
start. However, there are a few differences when developing plugins outside the {kib} repo. These differences are covered here.
-[float]
+[discrete]
[[automatic-plugin-generator]]
-==== Automatic plugin generator
+=== Automatic plugin generator
We recommend that you kick-start your plugin by generating it with the {kib-repo}tree/{branch}/packages/kbn-plugin-generator[Kibana Plugin Generator]. Run the following in the {kib} repo, and you will be asked a couple questions, see some progress bars, and have a freshly generated plugin ready for you to play with in {kib}'s `plugins` folder.
@@ -20,7 +20,7 @@ We recommend that you kick-start your plugin by generating it with the {kib-repo
node scripts/generate_plugin my_plugin_name # replace "my_plugin_name" with your desired plugin name
-----------
-[float]
+[discrete]
=== Plugin location
The {kib} directory must be named `kibana`, and your plugin directory should be located in the root of `kibana` in a `plugins` directory, for example:
@@ -37,6 +37,6 @@ The {kib} directory must be named `kibana`, and your plugin directory should be
* <>
* <>
-include::external-plugin-functional-tests.asciidoc[]
+include::external-plugin-functional-tests.asciidoc[leveloffset=+1]
-include::external-plugin-localization.asciidoc[]
+include::external-plugin-localization.asciidoc[leveloffset=+1]
diff --git a/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpostauth.md b/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpostauth.md
index eff53b7b75fa5..41b82f428948a 100644
--- a/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpostauth.md
+++ b/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpostauth.md
@@ -14,5 +14,5 @@ registerOnPostAuth: (handler: OnPostAuthHandler) => void;
## Remarks
-The auth state is available at stage via http.auth.get(..) Can register any number of registerOnPreRouting, which are called in sequence (from the first registered to the last). See [OnPostAuthHandler](./kibana-plugin-core-server.onpostauthhandler.md).
+The auth state is available at stage via http.auth.get(..) Can register any number of registerOnPostAuth, which are called in sequence (from the first registered to the last). See [OnPostAuthHandler](./kibana-plugin-core-server.onpostauthhandler.md).
diff --git a/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpreauth.md b/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpreauth.md
index ce4cacb1c8749..57b1833df5e03 100644
--- a/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpreauth.md
+++ b/docs/development/core/server/kibana-plugin-core-server.httpservicesetup.registeronpreauth.md
@@ -14,5 +14,5 @@ registerOnPreAuth: (handler: OnPreAuthHandler) => void;
## Remarks
-Can register any number of registerOnPostAuth, which are called in sequence (from the first registered to the last). See [OnPreRoutingHandler](./kibana-plugin-core-server.onpreroutinghandler.md).
+Can register any number of registerOnPreAuth, which are called in sequence (from the first registered to the last). See [OnPreAuthHandler](./kibana-plugin-core-server.onpreauthhandler.md).
diff --git a/docs/development/core/server/kibana-plugin-core-server.md b/docs/development/core/server/kibana-plugin-core-server.md
index a665327454c1a..61ffc532f0de5 100644
--- a/docs/development/core/server/kibana-plugin-core-server.md
+++ b/docs/development/core/server/kibana-plugin-core-server.md
@@ -122,7 +122,7 @@ The plugin integrates with the core system via lifecycle events: `setup`
| [OnPreAuthToolkit](./kibana-plugin-core-server.onpreauthtoolkit.md) | A tool set defining an outcome of OnPreAuth interceptor for incoming request. |
| [OnPreResponseExtensions](./kibana-plugin-core-server.onpreresponseextensions.md) | Additional data to extend a response. |
| [OnPreResponseInfo](./kibana-plugin-core-server.onpreresponseinfo.md) | Response status code. |
-| [OnPreResponseToolkit](./kibana-plugin-core-server.onpreresponsetoolkit.md) | A tool set defining an outcome of OnPreRouting interceptor for incoming request. |
+| [OnPreResponseToolkit](./kibana-plugin-core-server.onpreresponsetoolkit.md) | A tool set defining an outcome of OnPreResponse interceptor for incoming request. |
| [OnPreRoutingToolkit](./kibana-plugin-core-server.onpreroutingtoolkit.md) | A tool set defining an outcome of OnPreRouting interceptor for incoming request. |
| [OpsMetrics](./kibana-plugin-core-server.opsmetrics.md) | Regroups metrics gathered by all the collectors. This contains metrics about the os/runtime, the kibana process and the http server. |
| [OpsOsMetrics](./kibana-plugin-core-server.opsosmetrics.md) | OS related metrics |
diff --git a/docs/development/core/server/kibana-plugin-core-server.onpreresponsetoolkit.md b/docs/development/core/server/kibana-plugin-core-server.onpreresponsetoolkit.md
index 306c375ba4a3c..44da09d0cc68e 100644
--- a/docs/development/core/server/kibana-plugin-core-server.onpreresponsetoolkit.md
+++ b/docs/development/core/server/kibana-plugin-core-server.onpreresponsetoolkit.md
@@ -4,7 +4,7 @@
## OnPreResponseToolkit interface
-A tool set defining an outcome of OnPreRouting interceptor for incoming request.
+A tool set defining an outcome of OnPreResponse interceptor for incoming request.
Signature:
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.embeddable.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.embeddable.md
new file mode 100644
index 0000000000000..027ae4209b77f
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.embeddable.md
@@ -0,0 +1,11 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [ApplyGlobalFilterActionContext](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md) > [embeddable](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.embeddable.md)
+
+## ApplyGlobalFilterActionContext.embeddable property
+
+Signature:
+
+```typescript
+embeddable?: IEmbeddable;
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.filters.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.filters.md
new file mode 100644
index 0000000000000..6d1d20580fb19
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.filters.md
@@ -0,0 +1,11 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [ApplyGlobalFilterActionContext](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md) > [filters](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.filters.md)
+
+## ApplyGlobalFilterActionContext.filters property
+
+Signature:
+
+```typescript
+filters: Filter[];
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md
new file mode 100644
index 0000000000000..62817cd0a1e33
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md
@@ -0,0 +1,20 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [ApplyGlobalFilterActionContext](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md)
+
+## ApplyGlobalFilterActionContext interface
+
+Signature:
+
+```typescript
+export interface ApplyGlobalFilterActionContext
+```
+
+## Properties
+
+| Property | Type | Description |
+| --- | --- | --- |
+| [embeddable](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.embeddable.md) | IEmbeddable | |
+| [filters](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.filters.md) | Filter[] | |
+| [timeFieldName](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.timefieldname.md) | string | |
+
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.timefieldname.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.timefieldname.md
new file mode 100644
index 0000000000000..a5cf58018ec65
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.timefieldname.md
@@ -0,0 +1,11 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [ApplyGlobalFilterActionContext](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md) > [timeFieldName](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.timefieldname.md)
+
+## ApplyGlobalFilterActionContext.timeFieldName property
+
+Signature:
+
+```typescript
+timeFieldName?: string;
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.esaggsexpressionfunctiondefinition.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.esaggsexpressionfunctiondefinition.md
new file mode 100644
index 0000000000000..6cf05dde27627
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.esaggsexpressionfunctiondefinition.md
@@ -0,0 +1,11 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [EsaggsExpressionFunctionDefinition](./kibana-plugin-plugins-data-public.esaggsexpressionfunctiondefinition.md)
+
+## EsaggsExpressionFunctionDefinition type
+
+Signature:
+
+```typescript
+export declare type EsaggsExpressionFunctionDefinition = ExpressionFunctionDefinition<'esaggs', Input, Arguments, Output>;
+```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md
index 7cb6ef64431bf..db41936f35cca 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.md
@@ -48,6 +48,7 @@
| Interface | Description |
| --- | --- |
| [AggParamOption](./kibana-plugin-plugins-data-public.aggparamoption.md) | |
+| [ApplyGlobalFilterActionContext](./kibana-plugin-plugins-data-public.applyglobalfilteractioncontext.md) | |
| [DataPublicPluginSetup](./kibana-plugin-plugins-data-public.datapublicpluginsetup.md) | |
| [DataPublicPluginStart](./kibana-plugin-plugins-data-public.datapublicpluginstart.md) | |
| [EsQueryConfig](./kibana-plugin-plugins-data-public.esqueryconfig.md) | |
@@ -125,6 +126,7 @@
| [AggGroupName](./kibana-plugin-plugins-data-public.agggroupname.md) | |
| [AggParam](./kibana-plugin-plugins-data-public.aggparam.md) | |
| [CustomFilter](./kibana-plugin-plugins-data-public.customfilter.md) | |
+| [EsaggsExpressionFunctionDefinition](./kibana-plugin-plugins-data-public.esaggsexpressionfunctiondefinition.md) | |
| [EsQuerySortValue](./kibana-plugin-plugins-data-public.esquerysortvalue.md) | |
| [ExistsFilter](./kibana-plugin-plugins-data-public.existsfilter.md) | |
| [FieldFormatId](./kibana-plugin-plugins-data-public.fieldformatid.md) | id type is needed for creating custom converters. |
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.plugin.setup.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.plugin.setup.md
index 51bc46bbdccc8..a0c9b38792825 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.plugin.setup.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.plugin.setup.md
@@ -7,15 +7,15 @@
Signature:
```typescript
-setup(core: CoreSetup, { expressions, uiActions }: DataSetupDependencies): DataPublicPluginSetup;
+setup(core: CoreSetup, { expressions, uiActions, usageCollection }: DataSetupDependencies): DataPublicPluginSetup;
```
## Parameters
| Parameter | Type | Description |
| --- | --- | --- |
-| core | CoreSetup | |
-| { expressions, uiActions } | DataSetupDependencies | |
+| core | CoreSetup<DataStartDependencies, DataPublicPluginStart> | |
+| { expressions, uiActions, usageCollection } | DataSetupDependencies | |
Returns:
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.md
index abd57f3a9568b..1291af5359887 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.md
@@ -18,4 +18,5 @@ export interface SearchInterceptorDeps
| [http](./kibana-plugin-plugins-data-public.searchinterceptordeps.http.md) | CoreStart['http'] | |
| [toasts](./kibana-plugin-plugins-data-public.searchinterceptordeps.toasts.md) | ToastsStart | |
| [uiSettings](./kibana-plugin-plugins-data-public.searchinterceptordeps.uisettings.md) | CoreStart['uiSettings'] | |
+| [usageCollector](./kibana-plugin-plugins-data-public.searchinterceptordeps.usagecollector.md) | SearchUsageCollector | |
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.usagecollector.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.usagecollector.md
new file mode 100644
index 0000000000000..21afce1927676
--- /dev/null
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.searchinterceptordeps.usagecollector.md
@@ -0,0 +1,11 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [SearchInterceptorDeps](./kibana-plugin-plugins-data-public.searchinterceptordeps.md) > [usageCollector](./kibana-plugin-plugins-data-public.searchinterceptordeps.usagecollector.md)
+
+## SearchInterceptorDeps.usageCollector property
+
+Signature:
+
+```typescript
+usageCollector?: SearchUsageCollector;
+```
diff --git a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.esaggsexpressionfunctiondefinition.md b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.esaggsexpressionfunctiondefinition.md
new file mode 100644
index 0000000000000..572c4e0c1eb2f
--- /dev/null
+++ b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.esaggsexpressionfunctiondefinition.md
@@ -0,0 +1,11 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-server](./kibana-plugin-plugins-data-server.md) > [EsaggsExpressionFunctionDefinition](./kibana-plugin-plugins-data-server.esaggsexpressionfunctiondefinition.md)
+
+## EsaggsExpressionFunctionDefinition type
+
+Signature:
+
+```typescript
+export declare type EsaggsExpressionFunctionDefinition = ExpressionFunctionDefinition<'esaggs', Input, Arguments, Output>;
+```
diff --git a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.md b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.md
index ca8ad8fdc06ea..3afba80064f08 100644
--- a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.md
+++ b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.md
@@ -14,5 +14,6 @@ export interface ISearchSetup
| Property | Type | Description |
| --- | --- | --- |
-| [registerSearchStrategy](./kibana-plugin-plugins-data-server.isearchsetup.registersearchstrategy.md) | (name: string, strategy: ISearchStrategy) => void | Extension point exposed for other plugins to register their own search strategies. |
+| [registerSearchStrategy](./kibana-plugin-plugins-data-server.isearchsetup.registersearchstrategy.md) | TRegisterSearchStrategy | Extension point exposed for other plugins to register their own search strategies. |
+| [usage](./kibana-plugin-plugins-data-server.isearchsetup.usage.md) | SearchUsage | Used internally for telemetry |
diff --git a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.usage.md b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.usage.md
new file mode 100644
index 0000000000000..85abd9d9dba98
--- /dev/null
+++ b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.isearchsetup.usage.md
@@ -0,0 +1,13 @@
+
+
+[Home](./index.md) > [kibana-plugin-plugins-data-server](./kibana-plugin-plugins-data-server.md) > [ISearchSetup](./kibana-plugin-plugins-data-server.isearchsetup.md) > [usage](./kibana-plugin-plugins-data-server.isearchsetup.usage.md)
+
+## ISearchSetup.usage property
+
+Used internally for telemetry
+
+Signature:
+
+```typescript
+usage: SearchUsage;
+```
diff --git a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.md b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.md
index 9adefda718338..6bf481841f334 100644
--- a/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.md
+++ b/docs/development/plugins/data/server/kibana-plugin-plugins-data-server.md
@@ -69,6 +69,7 @@
| Type Alias | Description |
| --- | --- |
+| [EsaggsExpressionFunctionDefinition](./kibana-plugin-plugins-data-server.esaggsexpressionfunctiondefinition.md) | |
| [FieldFormatsGetConfigFn](./kibana-plugin-plugins-data-server.fieldformatsgetconfigfn.md) | |
| [IFieldFormatsRegistry](./kibana-plugin-plugins-data-server.ifieldformatsregistry.md) | |
| [ParsedInterval](./kibana-plugin-plugins-data-server.parsedinterval.md) | |
diff --git a/docs/drilldowns/explore-underlying-data.asciidoc b/docs/drilldowns/explore-underlying-data.asciidoc
new file mode 100644
index 0000000000000..e0f940f73e96e
--- /dev/null
+++ b/docs/drilldowns/explore-underlying-data.asciidoc
@@ -0,0 +1,41 @@
+[[explore-underlying-data]]
+== Explore the underlying data for a visualization
+
+++++
+Explore the underlying data
+++++
+
+Dashboard panels have an *Explore underlying data* action that navigates you to *Discover*,
+where you can narrow your documents to the ones you'll most likely use in a visualization.
+This action is available for visualizations backed by a single index pattern.
+
+You can access *Explore underlying data* in two ways: from the panel context
+menu or from the menu that appears when you interact with the chart.
+
+[float]
+[[explore-data-from-panel-context-menu]]
+=== Explore data from panel context menu
+
+The *Explore underlying data* action in the panel menu navigates you to Discover,
+carrying over the index pattern, filters, query, and time range for the visualization.
+
+[role="screenshot"]
+image::images/explore_data_context_menu.png[Explore underlying data from panel context menu]
+
+[float]
+[[explore-data-from-chart]]
+=== Explore data from chart action
+
+Initiating *Explore underlying data* from the chart also navigates to Discover,
+carrying over the current context for the visualization. In addition, this action
+applies the filters and time range created by the events that triggered the action.
+
+[role="screenshot"]
+image::images/explore_data_in_chart.png[Explore underlying data from chart]
+
+You can disable this action by adding the following line to your `kibana.yml` config.
+
+["source","yml"]
+-----------
+xpack.discoverEnhanced.actions.exploreDataInChart.enabled: false
+-----------
diff --git a/docs/drilldowns/images/explore_data_context_menu.png b/docs/drilldowns/images/explore_data_context_menu.png
new file mode 100644
index 0000000000000..5742991030c89
Binary files /dev/null and b/docs/drilldowns/images/explore_data_context_menu.png differ
diff --git a/docs/drilldowns/images/explore_data_in_chart.png b/docs/drilldowns/images/explore_data_in_chart.png
new file mode 100644
index 0000000000000..05d4f5fac9b2f
Binary files /dev/null and b/docs/drilldowns/images/explore_data_in_chart.png differ
diff --git a/docs/management/advanced-options.asciidoc b/docs/management/advanced-options.asciidoc
index 561919738786e..7dc360fd721f4 100644
--- a/docs/management/advanced-options.asciidoc
+++ b/docs/management/advanced-options.asciidoc
@@ -228,7 +228,7 @@ might increase the search time. This setting is off by default. Users must opt-i
`siem:defaultAnomalyScore`:: The threshold above which Machine Learning job anomalies are displayed in the SIEM app.
`siem:defaultIndex`:: A comma-delimited list of Elasticsearch indices from which the SIEM app collects events.
`siem:ipReputationLinks`:: A JSON array containing links for verifying the reputation of an IP address. The links are displayed on
-{siem-guide}/siem-ui-overview.html#network-ui[IP detail] pages.
+{security-guide}/siem-ui-overview.html#network-ui[IP detail] pages.
`siem:enableNewsFeed`:: Enables the security news feed on the SIEM *Overview*
page.
`siem:newsFeedUrl`:: The URL from which the security news feed content is
@@ -247,7 +247,7 @@ retrieved.
`timelion:es.timefield`:: The default field containing a timestamp when using the `.es()` query.
`timelion:graphite.url`:: [experimental] Used with graphite queries, this is the URL of your graphite host
in the form https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite. This URL can be
-selected from a whitelist configured in the `kibana.yml` under `timelion.graphiteUrls`.
+selected from an allow-list configured in the `kibana.yml` under `timelion.graphiteUrls`.
`timelion:max_buckets`:: The maximum number of buckets a single data source can return.
This value is used for calculating automatic intervals in visualizations.
`timelion:min_interval`:: The smallest interval to calculate when using "auto".
diff --git a/docs/maps/heatmap-layer.asciidoc b/docs/maps/heatmap-layer.asciidoc
index 7149bc5623169..9dc2781db44a3 100644
--- a/docs/maps/heatmap-layer.asciidoc
+++ b/docs/maps/heatmap-layer.asciidoc
@@ -7,8 +7,8 @@ Heat map layers cluster point data to show locations with higher densities.
[role="screenshot"]
image::maps/images/heatmap_layer.png[]
-To add a heat map layer to your map, click *Add layer*, then select the *Heat map* layer.
-The index must contain at least one field mapped as {ref}/geo-point.html[geo_point].
+To add a heat map layer to your map, click *Add layer*, then select *Heat map*.
+The index must contain at least one field mapped as {ref}/geo-point.html[geo_point] or {ref}/geo-shape.html[geo_shape].
NOTE: Only count, sum, unique count metric aggregations are available with the grid aggregation source and heat map layers.
Average, min, and max are turned off because the heat map will blend nearby values.
diff --git a/docs/maps/maps-aggregations.asciidoc b/docs/maps/maps-aggregations.asciidoc
index 872ed1cdedb7e..6b2dc8077bc30 100644
--- a/docs/maps/maps-aggregations.asciidoc
+++ b/docs/maps/maps-aggregations.asciidoc
@@ -47,7 +47,7 @@ Grid aggregation layers use {ref}/search-aggregations-bucket-geotilegrid-aggrega
Symbolize grid aggregation metrics as:
*Clusters*:: Creates a <> with a cluster symbol for each gridded cell.
-The cluster location is the weighted centroid for all geo-points in the gridded cell.
+The cluster location is the weighted centroid for all documents in the gridded cell.
*Grid rectangles*:: Creates a <> with a bounding box polygon for each gridded cell.
@@ -60,7 +60,7 @@ To enable a grid aggregation layer:
To enable a blended layer that dynamically shows clusters or documents:
. Click *Add layer*, then select the *Documents* layer.
-. Configure *Index pattern* and the *Geospatial field*. To enable clustering, the *Geospatial field* must be set to a field mapped as {ref}/geo-point.html[geo_point].
+. Configure *Index pattern* and the *Geospatial field*.
. In *Scaling*, select *Show clusters when results exceed 10000*.
diff --git a/docs/maps/maps-getting-started.asciidoc b/docs/maps/maps-getting-started.asciidoc
index 09a4dc61cae28..e0d43a571a331 100644
--- a/docs/maps/maps-getting-started.asciidoc
+++ b/docs/maps/maps-getting-started.asciidoc
@@ -68,40 +68,17 @@ The first layer you'll add is a choropleth layer to shade world countries
by web log traffic. Darker shades symbolize countries with more web log traffic,
and lighter shades symbolize countries with less traffic.
-==== Add a vector layer to display world country boundaries
-
. Click *Add layer*.
-. Select the *EMS Boundaries* layer.
+. Select *Choropleth*.
. From the *Layer* dropdown menu, select *World Countries*.
+. Under *Statistics source*, set *Index pattern* to *kibana_sample_data_logs*.
+. Set *Join field* to *geo.src*.
. Click the *Add layer* button.
. Set *Name* to `Total Requests by Country`.
. Set *Opacity* to 50%.
. Click *Add* under *Tooltip fields*.
. In the popover, select *ISO 3166-1 alpha-2 code* and *name* and click *Add*.
-
-===== Join the vector layer with the sample web log index
-
-You now have a vector layer containing the world countries.
-To symbolize countries by web traffic, you'll need to augment the world country features with the count of Elasticsearch weblog documents originating from each country.
-To do this, you'll create a <> to link the vector source *World Countries* to
-the {es} index `kibana_sample_data_logs` on the shared key iso2 = geo.src.
-
-. Click plus image:maps/images/gs_plus_icon.png[] next to the *Term Joins* label.
-. Click *Join --select--*
-. Set *Left field* to *ISO 3166-1 alpha-2 code*.
-. Set *Right source* to *kibana_sample_data_logs*.
-. Set *Right field* to *geo.src*.
-. Click *and use metric count*.
-. Set *Custom label* to *web logs count*.
-
-===== Set the layer style
-
-All of the world countries are still a single color because the layer is using <>.
-To shade the world countries based on which country is sending the most requests, you'll need to use <>.
-
-. Under *Fill color*, change the selected value from *Solid* to *By value*.
-. In the field select input, select *web logs count*.
-. Select the grey color ramp.
+. Under *Fill color*, select the grey color ramp.
. Under *Border color*, change the selected color to *white*.
. Click *Save & close*.
+
@@ -127,7 +104,7 @@ This layer displays web log documents as points.
The layer is only visible when users zoom in the map past zoom level 9.
. Click *Add layer*.
-. Click the *Documents* layer.
+. Select *Documents*.
. Set *Index pattern* to *kibana_sample_data_logs*.
. Click the *Add layer* button.
. Set *Name* to `Actual Requests`.
@@ -161,7 +138,7 @@ image::maps/images/grid_metrics_both.png[]
===== Add the layer
. Click *Add layer*.
-. Click the *Clusters and grids* layer.
+. Select *Clusters and grids*.
. Set *Index pattern* to *kibana_sample_data_logs*.
. Click the *Add layer* button.
. Set *Name* to `Total Requests and Bytes`.
diff --git a/docs/maps/tile-layer.asciidoc b/docs/maps/tile-layer.asciidoc
index 6da8dbad0a66d..2a60504c3c790 100644
--- a/docs/maps/tile-layer.asciidoc
+++ b/docs/maps/tile-layer.asciidoc
@@ -7,7 +7,7 @@ Tile layers display image tiles served from a tile server.
[role="screenshot"]
image::maps/images/tile_layer.png[]
-To add a tile layer to your map, click *Add layer*, then select one of the following layers:
+To add a tile layer to your map, click *Add layer*, then select one of the following:
*Configured Tile Map Service*:: Tile map service configured in kibana.yml.
See map.tilemap.url in <> for details.
@@ -16,4 +16,6 @@ See map.tilemap.url in <> for details.
*Tile Map Service*:: Tile map service configured in interface.
+*Vector tiles*:: Data service implementing the Mapbox vector tile specification.
+
*Web Map Service*:: Maps from OGC Standard WMS.
diff --git a/docs/maps/vector-layer.asciidoc b/docs/maps/vector-layer.asciidoc
index d6a5931659a40..494bd915b7f56 100644
--- a/docs/maps/vector-layer.asciidoc
+++ b/docs/maps/vector-layer.asciidoc
@@ -7,15 +7,14 @@ Vector layers display points, lines, and polygons.
[role="screenshot"]
image::maps/images/vector_layer.png[]
-To add a vector layer to your map, click *Add layer*, then select one of the following layers:
+To add a vector layer to your map, click *Add layer*, then select one of the following:
-*Clusters and grids*:: Geospatial data grouped in grids with metrics for each gridded cell.
-The index must contain at least one field mapped as {ref}/geo-point.html[geo_point].
+*Choropleth*:: Shaded areas to compare statistics across boundaries.
-*Configured GeoJSON*:: Vector data from hosted GeoJSON configured in kibana.yml.
-See map.regionmap.* in <> for details.
+*Clusters and grids*:: Geospatial data grouped in grids with metrics for each gridded cell.
+The index must contain at least one field mapped as {ref}/geo-point.html[geo_point] or {ref}/geo-shape.html[geo_shape].
-*Documents*:: Vector data from a Kibana index pattern.
+*Documents*:: Points, lines, and polyons from Elasticsearch.
The index must contain at least one field mapped as {ref}/geo-point.html[geo_point] or {ref}/geo-shape.html[geo_shape].
NOTE: Document results are limited to the `index.max_result_window` index setting, which defaults to 10000.
diff --git a/docs/settings/apm-settings.asciidoc b/docs/settings/apm-settings.asciidoc
index f78b0642f7fa3..b396c40aa21f9 100644
--- a/docs/settings/apm-settings.asciidoc
+++ b/docs/settings/apm-settings.asciidoc
@@ -47,7 +47,7 @@ Changing these settings may disable features of the APM App.
| Set to `false` to hide the APM app from the menu. Defaults to `true`.
| `xpack.apm.ui.transactionGroupBucketSize`
- | Number of top transaction groups displayed in the APM app. Defaults to `100`.
+ | Number of top transaction groups displayed in the APM app. Defaults to `1000`.
| `xpack.apm.ui.maxTraceItems` {ess-icon}
| Maximum number of child items displayed when viewing trace details. Defaults to `1000`.
diff --git a/docs/settings/ingest-manager-settings.asciidoc b/docs/settings/ingest-manager-settings.asciidoc
index 604471edc4d59..30e11f726c26b 100644
--- a/docs/settings/ingest-manager-settings.asciidoc
+++ b/docs/settings/ingest-manager-settings.asciidoc
@@ -8,8 +8,7 @@
experimental[]
You can configure `xpack.ingestManager` settings in your `kibana.yml`.
-By default, {ingest-manager} is not enabled. You need to
-enable it. To use {fleet}, you also need to configure {kib} and {es} hosts.
+By default, {ingest-manager} is enabled. To use {fleet}, you also need to configure {kib} and {es} hosts.
See the {ingest-guide}/index.html[Ingest Management] docs for more information.
@@ -19,7 +18,7 @@ See the {ingest-guide}/index.html[Ingest Management] docs for more information.
[cols="2*<"]
|===
| `xpack.ingestManager.enabled` {ess-icon}
- | Set to `true` to enable {ingest-manager}.
+ | Set to `true` (default) to enable {ingest-manager}.
| `xpack.ingestManager.fleet.enabled` {ess-icon}
| Set to `true` (default) to enable {fleet}.
|===
diff --git a/docs/settings/monitoring-settings.asciidoc b/docs/settings/monitoring-settings.asciidoc
index 48b5b5eb5d0c0..5b8fa0725d96b 100644
--- a/docs/settings/monitoring-settings.asciidoc
+++ b/docs/settings/monitoring-settings.asciidoc
@@ -7,7 +7,7 @@
By default, the Monitoring application is enabled, but data collection
is disabled. When you first start {kib} monitoring, you are prompted to
-enable data collection. If you are using {security}, you must be
+enable data collection. If you are using {stack-security-features}, you must be
signed in as a user with the `cluster:manage` privilege to enable
data collection. The built-in `superuser` role has this privilege and the
built-in `elastic` user has this role.
diff --git a/docs/setup/install.asciidoc b/docs/setup/install.asciidoc
index 73036da8f1390..cb47210cb3f08 100644
--- a/docs/setup/install.asciidoc
+++ b/docs/setup/install.asciidoc
@@ -53,8 +53,8 @@ Formulae are available from the Elastic Homebrew tap for installing {kib} on mac
<>
IMPORTANT: If your Elasticsearch installation is protected by
-{ref}/elasticsearch-security.html[{security}] see
-{kibana-ref}/using-kibana-with-security.html[Configuring security in Kibana] for
+{ref}/elasticsearch-security.html[{stack-security-features}] see
+{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}] for
additional setup instructions.
include::install/targz.asciidoc[]
diff --git a/docs/siem/index.asciidoc b/docs/siem/index.asciidoc
index 9d17b5209304f..ceb4ac2bf1f34 100644
--- a/docs/siem/index.asciidoc
+++ b/docs/siem/index.asciidoc
@@ -19,7 +19,7 @@ image::siem/images/overview-ui.png[SIEM Overview in Kibana]
== Add data
Kibana provides step-by-step instructions to help you add data. The
-{siem-guide}[SIEM Guide] is a good source for more
+{security-guide}[Security Guide] is a good source for more
detailed information and instructions.
[float]
diff --git a/docs/siem/siem-ui.asciidoc b/docs/siem/siem-ui.asciidoc
index 21a2ed55fdfdc..1caa13dc6c903 100644
--- a/docs/siem/siem-ui.asciidoc
+++ b/docs/siem/siem-ui.asciidoc
@@ -43,7 +43,7 @@ for creating signals. The SIEM app comes with prebuilt rules that search for
suspicious activity on your network and hosts. Additionally, you can
create your own rules.
-See {siem-guide}/detection-engine-overview.html[Detections] in the SIEM
+See {security-guide}/detection-engine-overview.html[Detections] in the SIEM
Guide for information on managing detection rules and signals via the UI
or the Detections API.
@@ -61,7 +61,7 @@ saved Timelines. Additionally, you can send cases to external systems from
within SIEM (currently ServiceNow and Jira).
For information about opening, updating, and closing cases, see
-{siem-guide}/cases-overview.html[Cases] in the SIEM Guide.
+{security-guide}/cases-overview.html[Cases] in the SIEM Guide.
[role="screenshot"]
image::siem/images/cases-ui.png[]
@@ -82,7 +82,7 @@ Hosts and Network pages, or even from within Timeline itself.
A timeline is responsive and persists as you move through the SIEM app
collecting data.
-See the {siem-guide}[SIEM Guide] for more details on data sources and an
+See the {security-guide}[Security Guide] for more details on data sources and an
overview of UI elements and capabilities.
[float]
diff --git a/docs/user/alerting/action-types.asciidoc b/docs/user/alerting/action-types.asciidoc
index e8dcf689df8e4..1743edb10f92b 100644
--- a/docs/user/alerting/action-types.asciidoc
+++ b/docs/user/alerting/action-types.asciidoc
@@ -23,6 +23,10 @@ a| <>
| Add a message to a Kibana log.
+a| <>
+
+| Push or update data to a new incident in ServiceNow.
+
a| <>
| Send a message to a Slack channel or user.
@@ -55,3 +59,4 @@ include::action-types/server-log.asciidoc[]
include::action-types/slack.asciidoc[]
include::action-types/webhook.asciidoc[]
include::action-types/pre-configured-connectors.asciidoc[]
+include::action-types/servicenow.asciidoc[]
diff --git a/docs/user/alerting/action-types/servicenow.asciidoc b/docs/user/alerting/action-types/servicenow.asciidoc
new file mode 100644
index 0000000000000..32f828aea2357
--- /dev/null
+++ b/docs/user/alerting/action-types/servicenow.asciidoc
@@ -0,0 +1,72 @@
+[role="xpack"]
+[[servicenow-action-type]]
+=== ServiceNow action
+
+The ServiceNow action type uses the https://developer.servicenow.com/app.do#!/rest_api_doc?v=orlando&id=c_TableAPI[V2 Table API] to create ServiceNow incidents.
+
+[float]
+[[servicenow-connector-configuration]]
+==== Connector configuration
+
+ServiceNow connectors have the following configuration properties:
+
+Name:: The name of the connector. The name is used to identify a connector in the management UI connector listing, or in the connector list when configuring an action.
+URL:: ServiceNow instance URL.
+Username:: Username for HTTP Basic authentication.
+Password:: Password for HTTP Basic authentication.
+
+[float]
+[[Preconfigured-servicenow-configuration]]
+==== Preconfigured action type
+
+[source,text]
+--
+ my-servicenow:
+ name: preconfigured-servicenow-action-type
+ actionTypeId: .servicenow
+ config:
+ apiUrl: https://dev94428.service-now.com/
+ secrets:
+ username: testuser
+ password: passwordkeystorevalue
+--
+
+`config` defines the action type specific to the configuration and contains the following properties:
+
+[cols="2*<"]
+|===
+
+| `apiUrl`
+| An address that corresponds to *Sender*.
+
+|===
+
+`secrets` defines sensitive information for the action type:
+
+[cols="2*<"]
+|===
+
+| `username`
+| A string that corresponds to *User*.
+
+| `password`
+| A string that corresponds to *Password*. Should be stored in the <>.
+
+|===
+
+[[servicenow-action-configuration]]
+==== Action configuration
+
+ServiceNow actions have the following configuration properties:
+
+Urgency:: The extent to which the incident resolution can delay.
+Severity:: The severity of the incident.
+Impact:: The effect an incident has on business. Can be measured by the number of affected users or by how critical it is to the business in question.
+Short description:: A short description of the incident, used for searching the contents of the knowledge base.
+Description:: The details about the incident.
+Additional comments:: Additional information for the client, such as how to troubleshoot the issue.
+
+[[configuring-servicenow]]
+==== Configuring and testing ServiceNow
+
+ServiceNow offers free https://developer.servicenow.com/dev.do#!/guides/madrid/now-platform/pdi-guide/obtaining-a-pdi[Personal Developer Instances], which you can use to test incidents.
diff --git a/docs/user/dashboard.asciidoc b/docs/user/dashboard.asciidoc
index a812d4e3bdd2d..b812af7e981bf 100644
--- a/docs/user/dashboard.asciidoc
+++ b/docs/user/dashboard.asciidoc
@@ -160,6 +160,7 @@ When you're finished adding and arranging the panels, save the dashboard.
. Enter the dashboard *Title* and optional *Description*, then *Save* the dashboard.
include::{kib-repo-dir}/drilldowns/drilldowns.asciidoc[]
+include::{kib-repo-dir}/drilldowns/explore-underlying-data.asciidoc[]
[[sharing-dashboards]]
== Share the dashboard
diff --git a/docs/user/ml/images/ml-annotations-list.jpg b/docs/user/ml/images/ml-annotations-list.jpg
deleted file mode 100644
index 8b1194dd20c0f..0000000000000
Binary files a/docs/user/ml/images/ml-annotations-list.jpg and /dev/null differ
diff --git a/docs/user/ml/images/ml-annotations-list.png b/docs/user/ml/images/ml-annotations-list.png
new file mode 100644
index 0000000000000..f1a0b66241126
Binary files /dev/null and b/docs/user/ml/images/ml-annotations-list.png differ
diff --git a/docs/user/ml/images/ml-job-management.jpg b/docs/user/ml/images/ml-job-management.jpg
deleted file mode 100644
index efdf7923c0faa..0000000000000
Binary files a/docs/user/ml/images/ml-job-management.jpg and /dev/null differ
diff --git a/docs/user/ml/images/ml-job-management.png b/docs/user/ml/images/ml-job-management.png
new file mode 100644
index 0000000000000..4589c7093a7cf
Binary files /dev/null and b/docs/user/ml/images/ml-job-management.png differ
diff --git a/docs/user/ml/images/ml-settings.jpg b/docs/user/ml/images/ml-settings.jpg
deleted file mode 100644
index 3713be005924d..0000000000000
Binary files a/docs/user/ml/images/ml-settings.jpg and /dev/null differ
diff --git a/docs/user/ml/images/ml-settings.png b/docs/user/ml/images/ml-settings.png
new file mode 100644
index 0000000000000..f5c9fca647389
Binary files /dev/null and b/docs/user/ml/images/ml-settings.png differ
diff --git a/docs/user/ml/images/ml-single-metric-viewer.jpg b/docs/user/ml/images/ml-single-metric-viewer.jpg
deleted file mode 100644
index 2fbb9387d1e29..0000000000000
Binary files a/docs/user/ml/images/ml-single-metric-viewer.jpg and /dev/null differ
diff --git a/docs/user/ml/images/ml-single-metric-viewer.png b/docs/user/ml/images/ml-single-metric-viewer.png
new file mode 100644
index 0000000000000..04c21d9bc533a
Binary files /dev/null and b/docs/user/ml/images/ml-single-metric-viewer.png differ
diff --git a/docs/user/ml/images/outliers.png b/docs/user/ml/images/outliers.png
index 3f4c5f6c6bbf0..874ebbc79201c 100644
Binary files a/docs/user/ml/images/outliers.png and b/docs/user/ml/images/outliers.png differ
diff --git a/docs/user/ml/index.asciidoc b/docs/user/ml/index.asciidoc
index 1bc74ce87de08..214dae2b96e04 100644
--- a/docs/user/ml/index.asciidoc
+++ b/docs/user/ml/index.asciidoc
@@ -47,20 +47,20 @@ create {anomaly-jobs} and manage jobs and {dfeeds} from the *Job Management*
pane:
[role="screenshot"]
-image::user/ml/images/ml-job-management.jpg[Job Management]
+image::user/ml/images/ml-job-management.png[Job Management]
You can use the *Settings* pane to create and edit
{ml-docs}/ml-calendars.html[calendars] and the filters that are used in
{ml-docs}/ml-rules.html[custom rules]:
[role="screenshot"]
-image::user/ml/images/ml-settings.jpg[Calendar Management]
+image::user/ml/images/ml-settings.png[Calendar Management]
The *Anomaly Explorer* and *Single Metric Viewer* display the results of your
{anomaly-jobs}. For example:
[role="screenshot"]
-image::user/ml/images/ml-single-metric-viewer.jpg[Single Metric Viewer]
+image::user/ml/images/ml-single-metric-viewer.png[Single Metric Viewer]
You can optionally add annotations by drag-selecting a period of time in
the *Single Metric Viewer* and adding a description. For example, you can add an
@@ -68,7 +68,7 @@ explanation for anomalies in that time period or provide notes about what is
occurring in your operational environment at that time:
[role="screenshot"]
-image::user/ml/images/ml-annotations-list.jpg[Single Metric Viewer with annotations]
+image::user/ml/images/ml-annotations-list.png[Single Metric Viewer with annotations]
In some circumstances, annotations are also added automatically. For example, if
the {anomaly-job} detects that there is missing data, it annotates the affected
@@ -94,8 +94,8 @@ The Elastic {ml} {dfanalytics} feature enables you to analyze your data using
indices that contain the results alongside your source data.
If you have a license that includes the {ml-features}, you can create
-{dfanalytics-jobs} and view their results on the *Analytics* page
-in {kib}. For example:
+{dfanalytics-jobs} and view their results on the *Data Frame Analytics* page in
+{kib}. For example:
[role="screenshot"]
image::user/ml/images/outliers.png[{oldetection-cap} results in {kib}]
diff --git a/docs/user/monitoring/monitoring-kibana.asciidoc b/docs/user/monitoring/monitoring-kibana.asciidoc
index b9ec3982eb3c5..bb8b3e5d42851 100644
--- a/docs/user/monitoring/monitoring-kibana.asciidoc
+++ b/docs/user/monitoring/monitoring-kibana.asciidoc
@@ -20,9 +20,10 @@ node in the production cluster. By default, it is is disabled (`false`).
+
--
NOTE: You can specify this setting in either the `elasticsearch.yml` on each
-node or across the cluster as a dynamic cluster setting. If {es}
-{security-features} are enabled, you must have `monitor` cluster privileges to
-view the cluster settings and `manage` cluster privileges to change them.
+node or across the cluster as a dynamic cluster setting. If
+{stack-security-features} are enabled, you must have `monitor` cluster
+privileges to view the cluster settings and `manage` cluster privileges to
+change them.
--
@@ -33,7 +34,7 @@ view the cluster settings and `manage` cluster privileges to change them.
--
By default, if you are running {kib} locally, go to `http://localhost:5601/`.
-If {es} {security-features} are enabled, log in.
+If {security-features} are enabled, log in.
--
... Open the menu, then go to *Stack Monitoring*. If data collection is
@@ -80,13 +81,13 @@ monitoring cluster prevents production cluster outages from impacting your
ability to access your monitoring data. It also prevents monitoring activities
from impacting the performance of your production cluster.
-If {security} is enabled on the production cluster, use an HTTPS URL such
-as `https://:9200` in this setting.
+If {security-features} are enabled on the production cluster, use an HTTPS
+URL such as `https://:9200` in this setting.
===============================
--
-. If the Elastic {security-features} are enabled on the production cluster:
+. If {security-features} are enabled on the production cluster:
.. Verify that there is a
valid user ID and password in the `elasticsearch.username` and
diff --git a/docs/user/reporting/chromium-sandbox.asciidoc b/docs/user/reporting/chromium-sandbox.asciidoc
index bfef5b8b86c6b..dcb421261c067 100644
--- a/docs/user/reporting/chromium-sandbox.asciidoc
+++ b/docs/user/reporting/chromium-sandbox.asciidoc
@@ -2,14 +2,16 @@
[[reporting-chromium-sandbox]]
=== Chromium sandbox
-When {reporting} uses the Chromium browser for generating PDF reports, it's recommended to use the sandbox for
-an additional layer of security. The Chromium sandbox uses operating system provided mechanisms to ensure that
-code execution cannot make persistent changes to the computer or access confidential information. The specific
-sandboxing techniques differ for each operating system.
+When {report-features} uses the Chromium browser for generating PDF reports,
+it's recommended to use the sandbox for an additional layer of security. The
+Chromium sandbox uses operating system provided mechanisms to ensure that
+code execution cannot make persistent changes to the computer or access
+confidential information. The specific sandboxing techniques differ for each
+operating system.
==== Linux sandbox
The Linux sandbox depends on user namespaces, which were introduced with the 3.8 Linux kernel. However, many
-distributions don't have user namespaces enabled by default, or they require the CAP_SYS_ADMIN capability. {reporting}
+distributions don't have user namespaces enabled by default, or they require the CAP_SYS_ADMIN capability. The {report-features}
will automatically disable the sandbox when it is running on Debian and CentOS as additional steps are required to enable
unprivileged usernamespaces. In these situations, you'll see the following message in your {kib} startup logs:
`Chromium sandbox provides an additional layer of protection, but is not supported for your OS.
diff --git a/docs/user/reporting/configuring-reporting.asciidoc b/docs/user/reporting/configuring-reporting.asciidoc
index 7489e2cf51f61..ca2d79bb2dec0 100644
--- a/docs/user/reporting/configuring-reporting.asciidoc
+++ b/docs/user/reporting/configuring-reporting.asciidoc
@@ -2,8 +2,8 @@
[[configuring-reporting]]
== Reporting configuration
-You can configure settings in `kibana.yml` to control how {reporting}
-communicates with the {kib} server, manages background jobs, and captures
+You can configure settings in `kibana.yml` to control how the {report-features}
+communicate with the {kib} server, manages background jobs, and captures
screenshots. See <> for the complete
list of settings.
@@ -11,9 +11,9 @@ list of settings.
[[encryption-keys]]
=== Encryption keys for multiple {kib} instances
-By default, a new encryption key is generated for {reporting} each time
-you start {kib}. This means if a static encryption key is not persisted in the
-{kib} configuration, any pending reports will fail when you restart {kib}.
+By default, a new encryption key is generated for the {report-features} each
+time you start {kib}. This means if a static encryption key is not persisted in
+the {kib} configuration, any pending reports will fail when you restart {kib}.
If you are load balancing across multiple {kib} instances, they need to have
the same reporting encryption key. Otherwise, report generation will fail if a
diff --git a/docs/user/reporting/development/index.asciidoc b/docs/user/reporting/development/index.asciidoc
index a64e540da0c70..4e86c803bd82d 100644
--- a/docs/user/reporting/development/index.asciidoc
+++ b/docs/user/reporting/development/index.asciidoc
@@ -1,9 +1,11 @@
[role="xpack"]
[[reporting-integration]]
== Reporting integration
-Integrating a {kib} application with {reporting} requires a minimum amount of code, and the goal is to not have to
-modify the Reporting code as we add additional applications. Instead, applications abide by a contract that Reporting
-uses to determine the information that is required to export CSVs and PDFs.
+Integrating a {kib} application with the {report-features} requires a minimum
+amount of code, and the goal is to not have to modify the reporting code as we
+add additional applications. Instead, applications abide by a contract that
+{report-features} use to determine the information that is required to export
+CSVs and PDFs.
[IMPORTANT]
==============================================
@@ -18,7 +20,7 @@ X-Pack uses the `share` plugin of the Kibana platform to register actions in the
[float]
=== Generate job URL
-To generate a new {reporting} job, different export types require different `jobParams` that are Rison encoded into a URL
+To generate a new reporting job, different export types require different `jobParams` that are Rison encoded into a URL
that abide by the following convention: `/api/reporting/generate?jobParams=${rison.encode(jobParams)}`. If you use the
aforementioned <> then this detail will be abstracted away, but if you
provide a custom UI for generating the report, you will have to generate the URL and create a POST request to the URL.
diff --git a/docs/user/reporting/gs-index.asciidoc b/docs/user/reporting/gs-index.asciidoc
index 87918ee76340e..46c1fd38b7d69 100644
--- a/docs/user/reporting/gs-index.asciidoc
+++ b/docs/user/reporting/gs-index.asciidoc
@@ -21,7 +21,7 @@ You can also <>.
IMPORTANT: Reports are stored in the `.reporting-*` indices. Any user with
access to these indices has access to every report generated by all users.
-To use {reporting} in a production environment,
+To use {report-features} in a production environment,
<>.
--
diff --git a/docs/user/reporting/index.asciidoc b/docs/user/reporting/index.asciidoc
index 6acdbbe3f0a99..e4e4b461ac2bd 100644
--- a/docs/user/reporting/index.asciidoc
+++ b/docs/user/reporting/index.asciidoc
@@ -19,7 +19,7 @@ image::user/reporting/images/share-button.png["Share"]
[float]
== Setup
-{reporting} is automatically enabled in {kib}. It runs a custom build of the Chromium web browser, which
+The {report-features} are automatically enabled in {kib}. It runs a custom build of the Chromium web browser, which
runs on the server in headless mode to load {kib} and capture the rendered {kib} charts as images.
Chromium is an open-source project not related to Elastic, but the Chromium binary for {kib} has been custom-built by Elastic to ensure it
diff --git a/docs/user/reporting/script-example.asciidoc b/docs/user/reporting/script-example.asciidoc
index 88f48ad1d3182..94301fc6fb448 100644
--- a/docs/user/reporting/script-example.asciidoc
+++ b/docs/user/reporting/script-example.asciidoc
@@ -19,7 +19,8 @@ curl \
// CONSOLE
<1> `POST` method is required.
-<2> Provide user credentials for a user with permission to access Kibana and X-Pack reporting.
+<2> Provide user credentials for a user with permission to access Kibana and
+{report-features}.
<3> The `kbn-version` header is required for all `POST` requests to Kibana.
**The value must match the dotted-numeral version of the Kibana instance.**
<4> The POST URL. You can copy and paste the URL for any report from the Kibana UI.
diff --git a/docs/user/reporting/watch-example.asciidoc b/docs/user/reporting/watch-example.asciidoc
index 627e31017230c..253722fefecc0 100644
--- a/docs/user/reporting/watch-example.asciidoc
+++ b/docs/user/reporting/watch-example.asciidoc
@@ -52,7 +52,7 @@ report from the Kibana UI.
<3> Optional, default is 40
<4> Optional, default is 15s
<5> Provide user credentials for a user with permission to access Kibana and
-{reporting}.
+the {report-features}.
//For more information, see <>.
//<>.
diff --git a/docs/user/security/reporting.asciidoc b/docs/user/security/reporting.asciidoc
index 30340e1db989a..4e02759ce99cb 100644
--- a/docs/user/security/reporting.asciidoc
+++ b/docs/user/security/reporting.asciidoc
@@ -5,8 +5,8 @@
Reporting operates by creating and updating documents in {es} in response to
user actions in {kib}.
-To use {reporting} with {security} enabled, you need to
-<>.
+To use {report-features} with {security-features} enabled, you need to
+<>.
If you are automatically generating reports with
{ref}/xpack-alerting.html[{watcher}], you also need to configure {watcher}
to trust the {kib} server's certificate.
@@ -118,10 +118,10 @@ reporting_user:
=== Secure the reporting endpoints
In a production environment, you should restrict access to
-the {reporting} endpoints to authorized users. This requires that you:
+the reporting endpoints to authorized users. This requires that you:
-. Enable {security} on your {es} cluster. For more information,
-see {ref}/security-getting-started.html[Getting Started with Security].
+. Enable {stack-security-features} on your {es} cluster. For more information,
+see {ref}/security-getting-started.html[Getting started with security].
. Configure TLS/SSL encryption for the {kib} server. For more information, see
<>.
. Specify the {kib} server's CA certificate chain in `elasticsearch.yml`:
@@ -150,13 +150,13 @@ For more information, see {ref}/notification-settings.html#ssl-notification-sett
--
. Add one or more users who have the permissions
-necessary to use {kib} and {reporting}. For more information, see
+necessary to use {kib} and {report-features}. For more information, see
<>.
-Once you've enabled SSL for {kib}, all requests to the {reporting} endpoints
+Once you've enabled SSL for {kib}, all requests to the reporting endpoints
must include valid credentials. For example, see the following page which
includes a watch that submits requests as the built-in `elastic` user:
<>.
For more information about configuring watches, see
-{ref}/how-watcher-works.html[How Watcher works].
+{ref}/how-watcher-works.html[How {watcher} works].
diff --git a/docs/user/security/securing-kibana.asciidoc b/docs/user/security/securing-kibana.asciidoc
index b30acd0ed2e53..0177ac94bd402 100644
--- a/docs/user/security/securing-kibana.asciidoc
+++ b/docs/user/security/securing-kibana.asciidoc
@@ -5,21 +5,21 @@
Configure security
++++
-{kib} users have to log in when {security} is enabled on your cluster. You
-configure {security} roles for your {kib} users to control what data those users
-can access.
+{kib} users have to log in when {stack-security-features} are enabled on your
+cluster. You configure roles for your {kib} users to control what data those
+users can access.
Most requests made through {kib} to {es} are authenticated by using the
credentials of the logged-in user. There are, however, a few internal requests
that the {kib} server needs to make to the {es} cluster. For this reason, you
must configure credentials for the {kib} server to use for those requests.
-With {security} enabled, if you load a {kib} dashboard that accesses data in an
-index that you are not authorized to view, you get an error that indicates the
-index does not exist. {security} do not currently provide a way to control which
-users can load which dashboards.
+With {security-features} enabled, if you load a {kib} dashboard that accesses
+data in an index that you are not authorized to view, you get an error that
+indicates the index does not exist. The {security-features} do not currently
+provide a way to control which users can load which dashboards.
-To use {kib} with {security}:
+To use {kib} with {security-features}:
. {ref}/configuring-security.html[Configure security in {es}].
@@ -38,8 +38,8 @@ elasticsearch.password: "kibanapassword"
The {kib} server submits requests as this user to access the cluster monitoring
APIs and the `.kibana` index. The server does _not_ need access to user indices.
-The password for the built-in `kibana_system` user is typically set as part of the
-{security} configuration process on {es}. For more information, see
+The password for the built-in `kibana_system` user is typically set as part of
+the security configuration process on {es}. For more information, see
{ref}/built-in-users.html[Built-in users].
--
@@ -53,7 +53,7 @@ as the encryption key.
xpack.security.encryptionKey: "something_at_least_32_characters"
--------------------------------------------------------------------------------
-For more information, see <>.
+For more information, see <>.
--
. Optional: Set a timeout to expire idle sessions. By default, a session stays
diff --git a/docs/visualize/aggregations.asciidoc b/docs/visualize/aggregations.asciidoc
index 868e66d0f4e36..ef38f716f2303 100644
--- a/docs/visualize/aggregations.asciidoc
+++ b/docs/visualize/aggregations.asciidoc
@@ -85,9 +85,9 @@ Bucket aggregations sort documents into buckets, depending on the contents of th
{ref}/search-aggregations-bucket-filter-aggregation.html[Filter]:: Each filter creates a bucket of documents. You can specify a filter as a
<> or <> query string.
-{ref}/search-aggregations-bucket-geohashgrid-aggregation.html[Geohash]:: Displays points based on a geohash. Supported by the tile map and data table visualizations.
+{ref}/search-aggregations-bucket-geohashgrid-aggregation.html[Geohash]:: Displays points based on a geohash. Supported by data table visualizations and <>.
-{ref}/search-aggregations-bucket-geotilegrid-aggregation.html[Geotile]:: Groups points based on web map tiling. Supported by the tile map and data table visualizations.
+{ref}/search-aggregations-bucket-geotilegrid-aggregation.html[Geotile]:: Groups points based on web map tiling. Supported by data table visualizations and <>.
{ref}/search-aggregations-bucket-histogram-aggregation.html[Histogram]:: Builds from a numeric field.
diff --git a/docs/visualize/images/vega_lite_tutorial_1.png b/docs/visualize/images/vega_lite_tutorial_1.png
new file mode 100644
index 0000000000000..4e8d0aba3635b
Binary files /dev/null and b/docs/visualize/images/vega_lite_tutorial_1.png differ
diff --git a/docs/visualize/images/vega_lite_tutorial_2.png b/docs/visualize/images/vega_lite_tutorial_2.png
new file mode 100644
index 0000000000000..523ae91514a11
Binary files /dev/null and b/docs/visualize/images/vega_lite_tutorial_2.png differ
diff --git a/docs/visualize/images/vega_tutorial_3.png b/docs/visualize/images/vega_tutorial_3.png
new file mode 100644
index 0000000000000..e025ecc585807
Binary files /dev/null and b/docs/visualize/images/vega_tutorial_3.png differ
diff --git a/docs/visualize/images/vega_tutorial_4.png b/docs/visualize/images/vega_tutorial_4.png
new file mode 100644
index 0000000000000..c8ee311e9bf5e
Binary files /dev/null and b/docs/visualize/images/vega_tutorial_4.png differ
diff --git a/docs/visualize/vega.asciidoc b/docs/visualize/vega.asciidoc
index 24bd3a44bebba..3a1c57da93f07 100644
--- a/docs/visualize/vega.asciidoc
+++ b/docs/visualize/vega.asciidoc
@@ -3,71 +3,1287 @@
experimental[]
-Build custom visualizations from multiple data sources using Vega
-and Vega-Lite.
+Build custom visualizations using Vega and Vega-Lite, backed by one or more
+data sources including {es}, Elastic Map Service, URL,
+or static data. Use the {kib} extensions to Vega to embed Vega into
+your dashboard, and to add interactivity to the visualizations.
-* *Vega* — A declarative format to create visualizations using JSON.
- Generate interactive displays using D3.
+Vega and Vega-Lite are both declarative formats to create visualizations
+using JSON. Both use a different syntax for declaring visualizations,
+and are not fully interchangeable.
-* *Vega-Lite* — An easier format to use than Vega that enables more rapid
- data analysis. Compiles into Vega.
+[float]
+[[when-to-vega]]
+=== When to use Vega
-For more information about Vega and Vega-Lite, refer to
-<>.
+Vega and Vega-Lite are capable of building most of the visualizations
+that {kib} provides, but with higher complexity. The most common reason
+to use Vega in {kib} is that {kib} is missing support for the query or
+visualization, for example:
-[float]
-[[create-vega-viz]]
-=== Create Vega visualizations
+* Aggregations using the `nested` or `parent/child` mapping
+* Aggregations without a {kib} index pattern
+* Queries using custom time filters
+* Complex calculations
+* Extracting data from _source instead of aggregation
+* Scatter charts
+* Sankey charts
+* Custom maps
+* Using a visual theme that {kib} does not provide
+
+[[vega-lite-tutorial]]
+=== Tutorial: First visualization in Vega-Lite
-You create Vega visualizations by using the text editor, which is
-preconfigured with the options you need.
+In this tutorial, you will learn about how to edit Vega-Lite in {kib} to create
+a stacked area chart from an {es} search query. It will give you a starting point
+for a more comprehensive
+https://vega.github.io/vega-lite/tutorials/getting_started.html[introduction to Vega-Lite],
+while only covering the basics.
+
+In this tutorial, you will build a stacked area chart from one of the {kib} sample data
+sets.
[role="screenshot"]
-image::images/vega_lite_default.png[]
+image::visualize/images/vega_lite_tutorial_1.png[]
-[float]
-[[vega-schema]]
-==== Change the Vega version
+Before beginning this tutorial, install the <>
+set.
+
+When you first open the Vega editor in {kib}, you will see a pre-populated
+line chart which shows the total number of documents across all your indices
+within the time range.
-The default visualization uses Vega-Lite version 2. To use Vega version 4, edit
-the `schema`.
+[role="screenshot"]
+image::visualize/images/vega_lite_default.png[]
+
+The text editor contains a Vega-Lite spec written in https://hjson.github.io/[HJSON],
+which is similar to JSON but optimized for human editing. HJSON supports:
-Go to `$schema`, enter `https://vega.github.io/schema/vega/v5.json`, then click
-*Update*.
+* Comments using // or /* syntax
+* Object keys without quotes
+* String values without quotes
+* Optional commas
+* Double or single quotes
+* Multiline strings
[float]
-[[vega-type]]
-==== Change the visualization type
+==== Small steps
-The default visualization is a line chart. To change the visualization type,
-change the `mark` value. The supported visualization types are listed in the
-text editor.
+Always work on Vega in the smallest steps possible, and save your work frequently.
+Small changes will cause unexpected results. Click the "Save" button now.
-Go to `mark`, change the value to a different visualization type, then click
-*Update*.
+The first step is to change the index to one of the <>
+sets. Change
+
+```yaml
+index: _all
+```
+
+to:
+
+```yaml
+index: kibana_sample_data_ecommerce
+```
+
+Click "Update". The result is probably not what you expect. You should see a flat
+line with 0 results.
+
+You've only changed the index, so the difference must be the query is returning
+no results. You can try the <>,
+but intuition may be faster for this particular problem.
+
+In this case, the problem is that you are querying the field `@timestamp`,
+which does not exist in the `kibana_sample_data_ecommerce` data. Find and replace
+`@timestamp` with `order_date`. This fixes the problem, leaving you with this spec:
+
+.Expand Vega-Lite spec
+[%collapsible%closed]
+====
+[source,yaml]
+----
+{
+ $schema: https://vega.github.io/schema/vega-lite/v4.json
+ title: Event counts from ecommerce
+ data: {
+ url: {
+ %context%: true
+ %timefield%: order_date
+ index: kibana_sample_data_ecommerce
+ body: {
+ aggs: {
+ time_buckets: {
+ date_histogram: {
+ field: order_date
+ interval: {%autointerval%: true}
+ extended_bounds: {
+ min: {%timefilter%: "min"}
+ max: {%timefilter%: "max"}
+ }
+ min_doc_count: 0
+ }
+ }
+ }
+ size: 0
+ }
+ }
+ format: {property: "aggregations.time_buckets.buckets" }
+ }
+
+ mark: line
+
+ encoding: {
+ x: {
+ field: key
+ type: temporal
+ axis: { title: null }
+ }
+ y: {
+ field: doc_count
+ type: quantitative
+ axis: { title: "Document count" }
+ }
+ }
+}
+----
+
+====
+
+Now, let's make the visualization more interesting by adding another aggregation
+to create a stacked area chart. To verify that you have constructed the right
+query, it is easiest to use the {kib} Dev Tools in a separate tab from the
+Vega editor. Open the Dev Tools from the Management section of the navigation.
+
+This query is roughly equivalent to the one that is used in the default
+Vega-Lite spec. Copy it into the Dev Tools:
+
+```js
+POST kibana_sample_data_ecommerce/_search
+{
+ "query": {
+ "range": {
+ "order_date": {
+ "gte": "now-7d"
+ }
+ }
+ },
+ "aggs": {
+ "time_buckets": {
+ "date_histogram": {
+ "field": "order_date",
+ "fixed_interval": "1d",
+ "extended_bounds": {
+ "min": "now-7d"
+ },
+ "min_doc_count": 0
+ }
+ }
+ },
+ "size": 0
+}
+```
+
+There's not enough data to create a stacked bar in the original query, so we
+will add a new
+{ref}/search-aggregations-bucket-terms-aggregation.html[terms aggregation]:
+
+```js
+POST kibana_sample_data_ecommerce/_search
+{
+ "query": {
+ "range": {
+ "order_date": {
+ "gte": "now-7d"
+ }
+ }
+ },
+ "aggs": {
+ "categories": {
+ "terms": { "field": "category.keyword" },
+ "aggs": {
+ "time_buckets": {
+ "date_histogram": {
+ "field": "order_date",
+ "fixed_interval": "1d",
+ "extended_bounds": {
+ "min": "now-7d"
+ },
+ "min_doc_count": 0
+ }
+ }
+ }
+ }
+ },
+ "size": 0
+}
+```
+
+You'll see that the response format looks different from the previous query:
+
+```json
+{
+ "aggregations" : {
+ "categories" : {
+ "doc_count_error_upper_bound" : 0,
+ "sum_other_doc_count" : 0,
+ "buckets" : [{
+ "key" : "Men's Clothing",
+ "doc_count" : 1661,
+ "time_buckets" : {
+ "buckets" : [{
+ "key_as_string" : "2020-06-30T00:00:00.000Z",
+ "key" : 1593475200000,
+ "doc_count" : 19
+ }, {
+ "key_as_string" : "2020-07-01T00:00:00.000Z",
+ "key" : 1593561600000,
+ "doc_count" : 71
+ }]
+ }
+ }]
+ }
+ }
+}
+```
+
+Now that we have data that we're happy with, it's time to convert from an
+isolated {es} query into a query with {kib} integration. Looking at the
+<>, you will
+see the full list of special tokens that are used in this query, such
+as `%context: true`. This query has also replaced `"fixed_interval": "1d"`
+with `interval: {%autointerval%: true}`. Copy the final query into
+your spec:
+
+```yaml
+ data: {
+ url: {
+ %context%: true
+ %timefield%: order_date
+ index: kibana_sample_data_ecommerce
+ body: {
+ aggs: {
+ categories: {
+ terms: { field: "category.keyword" }
+ aggs: {
+ time_buckets: {
+ date_histogram: {
+ field: order_date
+ interval: {%autointerval%: true}
+ extended_bounds: {
+ min: {%timefilter%: "min"}
+ max: {%timefilter%: "max"}
+ }
+ min_doc_count: 0
+ }
+ }
+ }
+ }
+ }
+ size: 0
+ }
+ }
+ format: {property: "aggregations.categories.buckets" }
+ }
+```
+
+If you copy and paste that into your Vega-Lite spec, and click "Update",
+you will see a warning saying `Infinite extent for field "key": [Infinity, -Infinity]`.
+Let's use our <> to understand why.
+
+Vega-Lite generates data using the names `source_0` and `data_0`. `source_0` contains
+the results from the {es} query, and `data_0` contains the visually encoded results
+which are shown in the chart. To debug this problem, you need to compare both.
+
+To look at the source, open the browser dev tools console and type
+`VEGA_DEBUG.view.data('source_0')`. You will see:
+
+```js
+[{
+ doc_count: 454
+ key: "Men's Clothing"
+ time_buckets: {buckets: Array(57)}
+ Symbol(vega_id): 12822
+}, ...]
+```
+
+To compare to the visually encoded data, open the browser dev tools console and type
+`VEGA_DEBUG.view.data('data_0')`. You will see:
+
+```js
+[{
+ doc_count: 454
+ key: NaN
+ time_buckets: {buckets: Array(57)}
+ Symbol(vega_id): 13879
+}]
+```
+
+The issue seems to be that the `key` property is not being converted the right way,
+which makes sense because the `key` is now `Men's Clothing` instead of a timestamp.
+
+To fix this, try updating the `encoding` of your Vega-Lite spec to:
+
+```yaml
+ encoding: {
+ x: {
+ field: time_buckets.buckets.key
+ type: temporal
+ axis: { title: null }
+ }
+ y: {
+ field: time_buckets.buckets.doc_count
+ type: quantitative
+ axis: { title: "Document count" }
+ }
+ }
+```
+
+This will show more errors, and you can inspect `VEGA_DEBUG.view.data('data_0')` to
+understand why. This now shows:
+
+```js
+[{
+ doc_count: 454
+ key: "Men's Clothing"
+ time_buckets: {buckets: Array(57)}
+ time_buckets.buckets.doc_count: undefined
+ time_buckets.buckets.key: null
+ Symbol(vega_id): 14094
+}]
+```
+
+It looks like the problem is that the `time_buckets` inner array is not being
+extracted by Vega. The solution is to use a Vega-lite
+https://vega.github.io/vega-lite/docs/flatten.html[flatten transformation], available in {kib} 7.9 and later.
+If using an older version of Kibana, the flatten transformation is available in Vega
+but not Vega-Lite.
+
+Add this section in between the `data` and `encoding` section:
+
+```yaml
+ transform: [{
+ flatten: ["time_buckets.buckets"]
+ }]
+```
+
+This does not yet produce the results you expect. Inspect the transformed data
+by typing `VEGA_DEBUG.view.data('data_0')` into the console again:
+
+```js
+[{
+ doc_count: 453
+ key: "Men's Clothing"
+ time_bucket.buckets.doc_count: undefined
+ time_buckets: {buckets: Array(57)}
+ time_buckets.buckets: {
+ key_as_string: "2020-06-30T15:00:00.000Z",
+ key: 1593529200000,
+ doc_count: 2
+ }
+ time_buckets.buckets.key: null
+ Symbol(vega_id): 21564
+}]
+```
+
+The debug view shows `undefined` values where you would expect to see numbers, and
+the cause is that there are duplicate names which are confusing Vega-Lite. This can
+be fixed by making this change to the `transform` and `encoding` blocks:
+
+```yaml
+ transform: [{
+ flatten: ["time_buckets.buckets"],
+ as: ["buckets"]
+ }]
+
+ mark: area
+
+ encoding: {
+ x: {
+ field: buckets.key
+ type: temporal
+ axis: { title: null }
+ }
+ y: {
+ field: buckets.doc_count
+ type: quantitative
+ axis: { title: "Document count" }
+ }
+ color: {
+ field: key
+ type: nominal
+ }
+ }
+```
+
+At this point, you have a stacked area chart that shows the top categories,
+but the chart is still missing some common features that we expect from a {kib}
+visualization. Let's add hover states and tooltips next.
+
+Hover states are handled differently in Vega-Lite and Vega. In Vega-Lite this is
+done using a concept called `selection`, which has many permutations that are not
+covered in this tutorial. We will be adding a simple tooltip and hover state.
+
+Because {kib} has enabled the https://vega.github.io/vega-lite/docs/tooltip.html[Vega tooltip plugin],
+tooltips can be defined in several ways:
+
+* Automatic tooltip based on the data, via `{ content: "data" }`
+* Array of fields, like `[{ field: "key", type: "nominal" }]`
+* Defining a custom Javascript object using the `calculate` transform
+
+For the simple tooltip, add this to your encoding:
+
+```yaml
+ encoding: {
+ tooltip: [{
+ field: buckets.key
+ type: temporal
+ title: "Date"
+ }, {
+ field: key
+ type: nominal
+ title: "Category"
+ }, {
+ field: buckets.doc_count
+ type: quantitative
+ title: "Count"
+ }]
+ }
+```
+
+As you hover over the area series in your chart, a multi-line tooltip will
+appear, but it won't indicate the nearest point that it's pointing to. To
+indicate the nearest point, we need to add a second layer.
+
+The first step is to remove the `mark: area` from your visualization.
+Once you've removed the previous mark, add a composite mark at the end of
+the Vega-Lite spec:
+
+```yaml
+ layer: [{
+ mark: area
+ }, {
+ mark: point
+ }]
+```
+
+You'll see that the points are not appearing to line up with the area chart,
+and the reason is that the points are not being stacked. Change your Y encoding
+to this:
+
+```yaml
+ y: {
+ field: buckets.doc_count
+ type: quantitative
+ axis: { title: "Document count" }
+ stack: true
+ }
+```
+
+Now, we will add a `selection` block inside the point mark:
+
+```yaml
+ layer: [{
+ mark: area
+ }, {
+ mark: point
+
+ selection: {
+ pointhover: {
+ type: single
+ on: mouseover
+ clear: mouseout
+ empty: none
+ fields: ["buckets.key", "key"]
+ nearest: true
+ }
+ }
+
+ encoding: {
+ size: {
+ condition: {
+ selection: pointhover
+ value: 100
+ }
+ value: 5
+ }
+ fill: {
+ condition: {
+ selection: pointhover
+ value: white
+ }
+ }
+ }
+ }]
+```
+
+Now that you've enabled a selection, try moving the mouse around the visualization
+and seeing the points respond to the nearest position:
+
+[role="screenshot"]
+image::visualize/images/vega_lite_tutorial_2.png[]
+
+The final result of this tutorial is this spec:
+
+.Expand final Vega-Lite spec
+[%collapsible%closed]
+====
+[source,yaml]
+----
+{
+ $schema: https://vega.github.io/schema/vega-lite/v4.json
+ title: Event counts from ecommerce
+ data: {
+ url: {
+ %context%: true
+ %timefield%: order_date
+ index: kibana_sample_data_ecommerce
+ body: {
+ aggs: {
+ categories: {
+ terms: { field: "category.keyword" }
+ aggs: {
+ time_buckets: {
+ date_histogram: {
+ field: order_date
+ interval: {%autointerval%: true}
+ extended_bounds: {
+ min: {%timefilter%: "min"}
+ max: {%timefilter%: "max"}
+ }
+ min_doc_count: 0
+ }
+ }
+ }
+ }
+ }
+ size: 0
+ }
+ }
+ format: {property: "aggregations.categories.buckets" }
+ }
+
+ transform: [{
+ flatten: ["time_buckets.buckets"]
+ as: ["buckets"]
+ }]
+
+ encoding: {
+ x: {
+ field: buckets.key
+ type: temporal
+ axis: { title: null }
+ }
+ y: {
+ field: buckets.doc_count
+ type: quantitative
+ axis: { title: "Document count" }
+ stack: true
+ }
+ color: {
+ field: key
+ type: nominal
+ title: "Category"
+ }
+ tooltip: [{
+ field: buckets.key
+ type: temporal
+ title: "Date"
+ }, {
+ field: key
+ type: nominal
+ title: "Category"
+ }, {
+ field: buckets.doc_count
+ type: quantitative
+ title: "Count"
+ }]
+ }
+
+ layer: [{
+ mark: area
+ }, {
+ mark: point
+
+ selection: {
+ pointhover: {
+ type: single
+ on: mouseover
+ clear: mouseout
+ empty: none
+ fields: ["buckets.key", "key"]
+ nearest: true
+ }
+ }
+
+ encoding: {
+ size: {
+ condition: {
+ selection: pointhover
+ value: 100
+ }
+ value: 5
+ }
+ fill: {
+ condition: {
+ selection: pointhover
+ value: white
+ }
+ }
+ }
+ }]
+}
+----
+
+====
+
+[[vega-tutorial]]
+=== Tutorial: Updating {kib} filters from Vega
+
+In this tutorial you will build an area chart in Vega using an {es} search query,
+and add a click handler and drag handler to update {kib} filters.
+This tutorial is not a full https://vega.github.io/vega/tutorials/[Vega tutorial],
+but will cover the basics of creating Vega visualizations into {kib}.
+
+First, create an almost-blank Vega chart by pasting this into the editor:
+
+```yaml
+{
+ $schema: "https://vega.github.io/schema/vega/v5.json"
+ data: [{
+ name: source_0
+ }]
+
+ scales: [{
+ name: x
+ type: time
+ range: width
+ }, {
+ name: y
+ type: linear
+ range: height
+ }]
+
+ axes: [{
+ orient: bottom
+ scale: x
+ }, {
+ orient: left
+ scale: y
+ }]
+
+ marks: [
+ {
+ type: area
+ from: {
+ data: source_0
+ }
+ encode: {
+ update: {
+ }
+ }
+ }
+ ]
+}
+```
+
+Despite being almost blank, this Vega spec still contains the minimum requirements:
+
+* Data
+* Scales
+* Marks
+* (optional) Axes
+
+Next, add a valid {es} search query in the `data` block:
+
+```yaml
+ data: [
+ {
+ name: source_0
+ url: {
+ %context%: true
+ %timefield%: order_date
+ index: kibana_sample_data_ecommerce
+ body: {
+ aggs: {
+ time_buckets: {
+ date_histogram: {
+ field: order_date
+ fixed_interval: "3h"
+ extended_bounds: {
+ min: {%timefilter%: "min"}
+ max: {%timefilter%: "max"}
+ }
+ min_doc_count: 0
+ }
+ }
+ }
+ size: 0
+ }
+ }
+ format: { property: "aggregations.time_buckets.buckets" }
+ }
+ ]
+```
+
+Click "Update", and nothing will change in the visualization. The first step
+is to change the X and Y scales based on the data:
+
+```yaml
+ scales: [{
+ name: x
+ type: time
+ range: width
+ domain: {
+ data: source_0
+ field: key
+ }
+ }, {
+ name: y
+ type: linear
+ range: height
+ domain: {
+ data: source_0
+ field: doc_count
+ }
+ }]
+```
+
+Click "Update", and you will see that the X and Y axes are now showing labels based
+on the real data.
+
+Next, encode the fields `key` and `doc_count` as the X and Y values:
+
+```yaml
+ marks: [
+ {
+ type: area
+ from: {
+ data: source_0
+ }
+ encode: {
+ update: {
+ x: {
+ scale: x
+ field: key
+ }
+ y: {
+ scale: y
+ value: 0
+ }
+ y2: {
+ scale: y
+ field: doc_count
+ }
+ }
+ }
+ }
+ ]
+```
+
+Click "Update" and you will get a basic area chart:
+
+[role="screenshot"]
+image::visualize/images/vega_tutorial_3.png[]
+
+Next, add a new block to the `marks` section. This will show clickable points to filter for a specific
+date:
+
+```yaml
+ {
+ name: point
+ type: symbol
+ style: ["point"]
+ from: {
+ data: source_0
+ }
+ encode: {
+ update: {
+ x: {
+ scale: x
+ field: key
+ }
+ y: {
+ scale: y
+ field: doc_count
+ }
+ size: {
+ value: 100
+ }
+ fill: {
+ value: black
+ }
+ }
+ }
+ }
+```
+
+Next, we will create a Vega signal to make the points clickable. You can access
+the clicked `datum` in the expression used to update. In this case, you want
+clicks on points to add a time filter with the 3-hour interval defined above.
+
+```yaml
+ signals: [
+ {
+ name: point_click
+ on: [{
+ events: {
+ source: scope
+ type: click
+ markname: point
+ }
+ update: '''kibanaSetTimeFilter(datum.key, datum.key + 3 * 60 * 60 * 1000)'''
+ }]
+ }
+ ]
+```
+
+This event is using the {kib} custom function `kibanaSetTimeFilter` to generate a filter that
+gets applied to the entire dashboard on click.
+
+The mouse cursor does not currently indicate that the chart is interactive. Find the `marks` section,
+and update the mark named `point` by adding `cursor: { value: "pointer" }` to
+the `encoding` section like this:
+
+```yaml
+ {
+ name: point
+ type: symbol
+ style: ["point"]
+ from: {
+ data: source_0
+ }
+ encode: {
+ update: {
+ ...
+ cursor: { value: "pointer" }
+ }
+ }
+ }
+```
+
+Next, we will add a drag interaction which will allow the user to narrow into
+a specific time range in the visualization. This will require adding more signals, and
+adding a rectangle overlay:
+
+[role="screenshot"]
+image::visualize/images/vega_tutorial_4.png[]
+
+The first step is to add a new `signal` to track the X position of the cursor:
+
+```yaml
+ {
+ name: currentX
+ value: -1
+ on: [{
+ events: {
+ type: mousemove
+ source: view
+ },
+ update: "clamp(x(), 0, width)"
+ }, {
+ events: {
+ type: mouseout
+ source: view
+ }
+ update: "-1"
+ }]
+ }
+```
+
+Now add a new `mark` to indicate the current cursor position:
+
+```yaml
+ {
+ type: rule
+ interactive: false
+ encode: {
+ update: {
+ y: {value: 0}
+ y2: {signal: "height"}
+ stroke: {value: "gray"}
+ strokeDash: {
+ value: [2, 1]
+ }
+ x: {signal: "max(currentX,0)"}
+ defined: {signal: "currentX > 0"}
+ }
+ }
+ }
+```
+
+Next, add a signal to track the current selected range, which will update
+until the user releases the mouse button or uses the escape key:
+
+
+```yaml
+ {
+ name: selected
+ value: [0, 0]
+ on: [{
+ events: {
+ type: mousedown
+ source: view
+ }
+ update: "[clamp(x(), 0, width), clamp(x(), 0, width)]"
+ }, {
+ events: {
+ type: mousemove
+ source: window
+ consume: true
+ between: [{
+ type: mousedown
+ source: view
+ }, {
+ merge: [{
+ type: mouseup
+ source: window
+ }, {
+ type: keydown
+ source: window
+ filter: "event.key === 'Escape'"
+ }]
+ }]
+ }
+ update: "[selected[0], clamp(x(), 0, width)]"
+ }, {
+ events: {
+ type: keydown
+ source: window
+ filter: "event.key === 'Escape'"
+ }
+ update: "[0, 0]"
+ }]
+ }
+```
+
+Now that there is a signal which tracks the time range from the user, we need to indicate
+the range visually by adding a new mark which only appears conditionally:
+
+```yaml
+ {
+ type: rect
+ name: selectedRect
+ encode: {
+ update: {
+ height: {signal: "height"}
+ fill: {value: "#333"}
+ fillOpacity: {value: 0.2}
+ x: {signal: "selected[0]"}
+ x2: {signal: "selected[1]"}
+ defined: {signal: "selected[0] !== selected[1]"}
+ }
+ }
+ }
+```
+
+Finally, add a new signal which will update the {kib} time filter when the mouse is released while
+dragging:
+
+```yaml
+ {
+ name: applyTimeFilter
+ value: null
+ on: [{
+ events: {
+ type: mouseup
+ source: view
+ }
+ update: '''selected[0] !== selected[1] ? kibanaSetTimeFilter(
+ invert('x',selected[0]),
+ invert('x',selected[1])) : null'''
+ }]
+ }
+```
+
+Putting this all together, your visualization now supports the main features of
+standard visualizations in {kib}, but with the potential to add even more control.
+The final Vega spec for this tutorial is here:
+
+.Expand final Vega spec
+[%collapsible%closed]
+====
+[source,yaml]
+----
+{
+ $schema: "https://vega.github.io/schema/vega/v5.json"
+ data: [
+ {
+ name: source_0
+ url: {
+ %context%: true
+ %timefield%: order_date
+ index: kibana_sample_data_ecommerce
+ body: {
+ aggs: {
+ time_buckets: {
+ date_histogram: {
+ field: order_date
+ fixed_interval: "3h"
+ extended_bounds: {
+ min: {%timefilter%: "min"}
+ max: {%timefilter%: "max"}
+ }
+ min_doc_count: 0
+ }
+ }
+ }
+ size: 0
+ }
+ }
+ format: { property: "aggregations.time_buckets.buckets" }
+ }
+ ]
+
+ scales: [{
+ name: x
+ type: time
+ range: width
+ domain: {
+ data: source_0
+ field: key
+ }
+ }, {
+ name: y
+ type: linear
+ range: height
+ domain: {
+ data: source_0
+ field: doc_count
+ }
+ }]
+
+ axes: [{
+ orient: bottom
+ scale: x
+ }, {
+ orient: left
+ scale: y
+ }]
+
+ marks: [
+ {
+ type: area
+ from: {
+ data: source_0
+ }
+ encode: {
+ update: {
+ x: {
+ scale: x
+ field: key
+ }
+ y: {
+ scale: y
+ value: 0
+ }
+ y2: {
+ scale: y
+ field: doc_count
+ }
+ }
+ }
+ },
+ {
+ name: point
+ type: symbol
+ style: ["point"]
+ from: {
+ data: source_0
+ }
+ encode: {
+ update: {
+ x: {
+ scale: x
+ field: key
+ }
+ y: {
+ scale: y
+ field: doc_count
+ }
+ size: {
+ value: 100
+ }
+ fill: {
+ value: black
+ }
+ cursor: { value: "pointer" }
+ }
+ }
+ },
+ {
+ type: rule
+ interactive: false
+ encode: {
+ update: {
+ y: {value: 0}
+ y2: {signal: "height"}
+ stroke: {value: "gray"}
+ strokeDash: {
+ value: [2, 1]
+ }
+ x: {signal: "max(currentX,0)"}
+ defined: {signal: "currentX > 0"}
+ }
+ }
+ },
+ {
+ type: rect
+ name: selectedRect
+ encode: {
+ update: {
+ height: {signal: "height"}
+ fill: {value: "#333"}
+ fillOpacity: {value: 0.2}
+ x: {signal: "selected[0]"}
+ x2: {signal: "selected[1]"}
+ defined: {signal: "selected[0] !== selected[1]"}
+ }
+ }
+ }
+ ]
+
+ signals: [
+ {
+ name: point_click
+ on: [{
+ events: {
+ source: scope
+ type: click
+ markname: point
+ }
+ update: '''kibanaSetTimeFilter(datum.key, datum.key + 3 * 60 * 60 * 1000)'''
+ }]
+ }
+ {
+ name: currentX
+ value: -1
+ on: [{
+ events: {
+ type: mousemove
+ source: view
+ },
+ update: "clamp(x(), 0, width)"
+ }, {
+ events: {
+ type: mouseout
+ source: view
+ }
+ update: "-1"
+ }]
+ }
+ {
+ name: selected
+ value: [0, 0]
+ on: [{
+ events: {
+ type: mousedown
+ source: view
+ }
+ update: "[clamp(x(), 0, width), clamp(x(), 0, width)]"
+ }, {
+ events: {
+ type: mousemove
+ source: window
+ consume: true
+ between: [{
+ type: mousedown
+ source: view
+ }, {
+ merge: [{
+ type: mouseup
+ source: window
+ }, {
+ type: keydown
+ source: window
+ filter: "event.key === 'Escape'"
+ }]
+ }]
+ }
+ update: "[selected[0], clamp(x(), 0, width)]"
+ }, {
+ events: {
+ type: keydown
+ source: window
+ filter: "event.key === 'Escape'"
+ }
+ update: "[0, 0]"
+ }]
+ }
+ {
+ name: applyTimeFilter
+ value: null
+ on: [{
+ events: {
+ type: mouseup
+ source: view
+ }
+ update: '''selected[0] !== selected[1] ? kibanaSetTimeFilter(
+ invert('x',selected[0]),
+ invert('x',selected[1])) : null'''
+ }]
+ }
+ ]
+}
+
+----
+====
+
+[[vega-reference]]
+=== Reference for {kib} extensions
+
+{kib} has extended Vega and Vega-Lite with extensions that support:
+
+* Default height and width
+* Default theme to match {kib}
+* Writing {es} queries using the time range and filters from dashboards
+* Using the Elastic Map Service in Vega maps
+* Additional tooltip styling
+* Advanced setting to enable URL loading from any domain
+* Limited debugging support using the browser dev tools
+* (Vega only) Expression functions which can update the time range and dashboard filters
-[float]
[[vega-sizing-and-positioning]]
-==== Change the layout
+==== Default height and width
By default, Vega visualizations use the `autosize = { type: 'fit', contains: 'padding' }` layout.
`fit` uses all available space, ignores `width` and `height` values,
and respects the padding values. To override this behavior, change the
`autosize` value.
-[[vega-querying-elasticsearch]]
-=== Query {es}
+[[vega-theme]]
+==== Default theme to match {kib}
+
+{kib} registers a default https://vega.github.io/vega/docs/schemes/[Vega color scheme]
+with the id `elastic`, and sets a default color for each `mark` type.
+Override it by providing a different `stroke`, `fill`, or `color` (Vega-Lite) value.
+
+[[vega-queries]]
+==== Writing {es} queries in Vega
+
+experimental[] {kib} extends the Vega https://vega.github.io/vega/docs/data/[data] elements
+with support for direct {es} queries specified as a `url`.
-experimental[] Vega https://vega.github.io/vega/docs/data/[data] elements
-use embedded and external data with a `"url"` parameter. {kib} adds support for
-direct {es} queries by overloading
-the `"url"` value.
+Because of this, {kib} is **unable to support dynamically loaded data**,
+which would otherwise work in Vega. All data is fetched before it's passed to
+the Vega renderer.
-NOTE: With Vega, you dynamically load your data by setting signals as data URLs.
-Since {kib} is unable to support dynamically loaded data, all data is fetched
-before it's passed to the Vega renderer.
+To define an {es} query in Vega, set the `url` to an object. {kib} will parse
+the object looking for special tokens that allow your query to integrate with {kib}.
+These tokens are:
-For example, count the number of documents in all indices:
+* `%context%: true`: Set at the top level, and replaces the `query` section with filters from dashboard
+* `%timefield%: `: Set at the top level, integrates the query with the dashboard time filter
+* `{%timefilter%: true}`: Replaced by an {es} range query with upper and lower bounds
+* `{%timefilter%: "min" | "max"}`: Replaced only by the upper or lower bounds
+* `{%timefilter: true, shift: -1, unit: 'hour'}`: Generates a time range query one hour in the past
+* `{%autointerval%: true}`: Replaced by the string which contains the automatic {kib} time interval, such as `1h`
+* `{%autointerval%: 10}`: Replaced by a string which is approximately dividing the time into 10 ranges, allowing
+ you to influence the automatic interval
+* `"%dashboard_context-must_clause%"`: String replaced by object containing filters
+* `"%dashboard_context-filter_clause%"`: String replaced by an object containing filters
+* `"%dashboard_context-must_not_clause%"`: String replaced by an object containing filters
+
+Putting this together, an example query that counts the number of documents in
+a specific index:
[source,yaml]
----
@@ -80,8 +1296,8 @@ url: {
%context%: true
// Which indexes to search
- index: _all
- // The body element may contain "aggs" and "query" subfields
+ index: kibana_sample_data_logs
+ // The body element may contain "aggs" and "query" keys
body: {
aggs: {
time_buckets: {
@@ -183,7 +1399,7 @@ except that the time range is shifted back by 10 minutes:
}
----
-NOTE: When using `"%context%": true` or defining a value for "%timefield%"` the body cannot contain a query. To customize the query within the VEGA specification (e.g. add an additional filter, or shift the timefilter), define your query and use the placeholders as in the example above. The placeholders will be replaced by the actual context of the dashboard or visualization once parsed.
+NOTE: When using `"%context%": true` or defining a value for `"%timefield%"` the body cannot contain a query. To customize the query within the VEGA specification (e.g. add an additional filter, or shift the timefilter), define your query and use the placeholders as in the example above. The placeholders will be replaced by the actual context of the dashboard or visualization once parsed.
The `"%timefilter%"` can also be used to specify a single min or max
value. The date_histogram's `extended_bounds` can be set
@@ -194,6 +1410,7 @@ also supported. The `"interval"` can also be set dynamically, depending
on the currently picked range: `"interval": {"%autointerval%": 10}` will
try to get about 10-15 data points (buckets).
+[float]
[[vega-esmfiles]]
=== Access Elastic Map Service files
@@ -260,21 +1477,44 @@ Additionally, you can use `latitude`, `longitude`, and `zoom` signals.
These signals can be used in the graph, or can be updated to modify the
position of the map.
-Vega visualization ignore the `autosize`, `width`, `height`, and `padding`
-values, using `fit` model with zero padding.
+[float]
+[[vega-tooltip]]
+==== Additional tooltip styling
+
+{kib} has installed the https://vega.github.io/vega-lite/docs/tooltip.html[Vega tooltip plugin],
+so tooltips can be defined in the ways documented there. Beyond that, {kib} also supports
+a configuration option for changing the tooltip position and padding:
+
+```js
+{
+ config: {
+ kibana: {
+ tooltips: {
+ position: 'top',
+ padding: 15
+ }
+ }
+ }
+}
+```
+
+[[vega-url-loading]]
+==== Advanced setting to enable URL loading from any domain
-[[vega-debugging]]
-=== Debugging Vega
+Vega can load data from any URL, but this is disabled by default in {kib}.
+To change this, set `vis_type_vega.enableExternalUrls: true` in `kibana.yml`,
+then restart {kib}.
[[vega-browser-debugging-console]]
==== Browser debugging console
experimental[] Use browser debugging tools (for example, F12 or Ctrl+Shift+J in Chrome) to
inspect the `VEGA_DEBUG` variable:
-+
+
* `view` — Access to the Vega View object. See https://vega.github.io/vega/docs/api/debugging/[Vega Debugging Guide]
-on how to inspect data and signals at runtime. For Vega-Lite, `VEGA_DEBUG.view.data('source_0')` gets the main data set.
-For Vega, it uses the data name as defined in your Vega spec.
+on how to inspect data and signals at runtime. For Vega-Lite,
+`VEGA_DEBUG.view.data('source_0')` gets the pre-transformed data, and `VEGA_DEBUG.view.data('data_0')`
+gets the encoded data. For Vega, it uses the data name as defined in your Vega spec.
* `vega_spec` — Vega JSON graph specification after some modifications by {kib}. In case
of Vega-Lite, this is the output of the Vega-Lite compiler.
@@ -283,7 +1523,7 @@ of Vega-Lite, this is the output of the Vega-Lite compiler.
Vega-Lite compilation.
[[vega-data]]
-==== Data
+==== Debugging data
experimental[] If you are using an {es} query, make sure your resulting data is
what you expected. The easiest way to view it is by using the "networking"
@@ -294,45 +1534,52 @@ https://www.elastic.co/guide/en/kibana/current/console-kibana.html[Dev Tools]. P
`GET /_search`, then add your query as the following lines
(just the value of the `"query"` field).
-If you need to share your graph with someone, copy the
-raw data response to https://gist.github.com/[gist.github.com], possibly
-with a `.json` extension, use the `[raw]` button, and use that url
-directly in your graph.
+[[vega-getting-help]]
+==== Asking for help with a Vega spec
-To restrict Vega from using non-ES data sources, add `vega.enableExternalUrls: false`
-to your kibana.yml file.
+Because of the dynamic nature of the data in {es}, it is hard to help you with
+Vega specs unless you can share a dataset. To do this, use the browser developer
+tools and type:
-[[vega-notes]]
-[[vega-useful-links]]
-=== Resources and examples
+`JSON.stringify(VEGA_DEBUG.vegalite_spec, null, 2)`
-experimental[] To learn more about Vega and Vega-List, refer to the resources and examples.
+Copy the response to https://gist.github.com/[gist.github.com], possibly
+with a `.json` extension, use the `[raw]` button, and share that when
+asking for help.
-==== Vega editor
-The https://vega.github.io/editor/[Vega Editor] includes examples for Vega & Vega-Lite, but does not support any
-{kib}-specific features like {es} requests and interactive base maps.
+[float]
+[[vega-expression-functions]]
+==== (Vega only) Expression functions which can update the time range and dashboard filters
-==== Vega-Lite resources
-* https://vega.github.io/vega-lite/tutorials/getting_started.html[Tutorials]
-* https://vega.github.io/vega-lite/docs/[Docs]
-* https://vega.github.io/vega-lite/examples/[Examples]
+{kib} has extended the Vega expression language with these functions:
-==== Vega resources
-* https://vega.github.io/vega/tutorials/[Tutorials]
-* https://vega.github.io/vega/docs/[Docs]
-* https://vega.github.io/vega/examples/[Examples]
+```js
+/**
+ * @param {object} query Elastic Query DSL snippet, as used in the query DSL editor
+ * @param {string} [index] as defined in Kibana, or default if missing
+ */
+kibanaAddFilter(query, index)
-TIP: When you use the examples, you may
-need to modify the "data" section to use absolute URL. For example,
-replace `"url": "data/world-110m.json"` with
-`"url": "https://vega.github.io/editor/data/world-110m.json"`.
+/**
+ * @param {object} query Elastic Query DSL snippet, as used in the query DSL editor
+ * @param {string} [index] as defined in Kibana, or default if missing
+ */
+kibanaRemoveFilter(query, index)
+
+kibanaRemoveAllFilters()
+/**
+ * Update dashboard time filter to the new values
+ * @param {number|string|Date} start
+ * @param {number|string|Date} end
+ */
+kibanaSetTimeFilter(start, end)
+```
+
+[float]
[[vega-additional-configuration-options]]
==== Additional configuration options
-These options are specific to the {kib}. link:#vega-with-a-map[Map support] has
-additional configuration options.
-
[source,yaml]
----
{
@@ -343,12 +1590,37 @@ additional configuration options.
controlsLocation: top
// Can be `vertical` or `horizontal` (default).
controlsDirection: vertical
- // If true, hides most of Vega and VegaLite warnings
+ // If true, hides most of Vega and Vega-Lite warnings
hideWarnings: true
// Vega renderer to use: `svg` or `canvas` (default)
renderer: canvas
}
}
- /* the rest of Vega code */
}
----
+
+
+[[vega-notes]]
+[[vega-useful-links]]
+=== Resources and examples
+
+experimental[] To learn more about Vega and Vega-Lite, refer to the resources and examples.
+
+==== Vega editor
+The https://vega.github.io/editor/[Vega Editor] includes examples for Vega & Vega-Lite, but does not support any
+{kib}-specific features like {es} requests and interactive base maps.
+
+==== Vega-Lite resources
+* https://vega.github.io/vega-lite/tutorials/getting_started.html[Tutorials]
+* https://vega.github.io/vega-lite/docs/[Docs]
+* https://vega.github.io/vega-lite/examples/[Examples]
+
+==== Vega resources
+* https://vega.github.io/vega/tutorials/[Tutorials]
+* https://vega.github.io/vega/docs/[Docs]
+* https://vega.github.io/vega/examples/[Examples]
+
+TIP: When you use the examples in {kib}, you may
+need to modify the "data" section to use absolute URL. For example,
+replace `"url": "data/world-110m.json"` with
+`"url": "https://vega.github.io/editor/data/world-110m.json"`.
diff --git a/examples/README.md b/examples/README.asciidoc
similarity index 68%
rename from examples/README.md
rename to examples/README.asciidoc
index 2b214a8d1eb52..d33c5e825ce12 100644
--- a/examples/README.md
+++ b/examples/README.asciidoc
@@ -1,7 +1,9 @@
-## Example plugins
+[[example-plugins]]
+== Example plugins
This folder contains example plugins. To run the plugins in this folder, use the `--run-examples` flag, via
-```
+[source,bash]
+----
yarn start --run-examples
-```
+----
diff --git a/examples/routing_example/README.md b/examples/routing_example/README.md
new file mode 100644
index 0000000000000..0a88707bf70bb
--- /dev/null
+++ b/examples/routing_example/README.md
@@ -0,0 +1,9 @@
+Team owner: Platform
+
+A working example of a plugin that registers and uses multiple custom routes.
+
+Read more:
+
+- [IRouter API Docs](../../docs/development/core/server/kibana-plugin-core-server.irouter.md)
+- [HttpHandler (core.http.fetch) API Docs](../../docs/development/core/public/kibana-plugin-core-public.httphandler.md)
+- [Routing Conventions](../../STYLEGUIDE.md#api-endpoints)
\ No newline at end of file
diff --git a/src/legacy/core_plugins/timelion/public/directives/timelion_options_sheet.js b/examples/routing_example/common/index.ts
similarity index 69%
rename from src/legacy/core_plugins/timelion/public/directives/timelion_options_sheet.js
rename to examples/routing_example/common/index.ts
index 067c831f09de5..5aa47b1f69cdf 100644
--- a/src/legacy/core_plugins/timelion/public/directives/timelion_options_sheet.js
+++ b/examples/routing_example/common/index.ts
@@ -17,14 +17,11 @@
* under the License.
*/
-import { uiModules } from 'ui/modules';
-import template from 'plugins/timelion/partials/sheet_options.html';
-const app = uiModules.get('apps/timelion', []);
+export const RANDOM_NUMBER_ROUTE_PATH = '/api/random_number';
-app.directive('timelionOptions', function () {
- return {
- replace: true,
- restrict: 'E',
- template,
- };
-});
+export const RANDOM_NUMBER_BETWEEN_ROUTE_PATH = '/api/random_number_between';
+
+export const POST_MESSAGE_ROUTE_PATH = '/api/post_message';
+
+// Internal APIs should use the `internal` prefix, instead of the `api` prefix.
+export const INTERNAL_GET_MESSAGE_BY_ID_ROUTE = '/internal/get_message';
diff --git a/examples/routing_example/kibana.json b/examples/routing_example/kibana.json
new file mode 100644
index 0000000000000..37851a0da5a85
--- /dev/null
+++ b/examples/routing_example/kibana.json
@@ -0,0 +1,9 @@
+{
+ "id": "routingExample",
+ "version": "0.0.1",
+ "kibanaVersion": "kibana",
+ "server": true,
+ "ui": true,
+ "requiredPlugins": ["developerExamples"],
+ "optionalPlugins": []
+}
diff --git a/examples/routing_example/public/app.tsx b/examples/routing_example/public/app.tsx
new file mode 100644
index 0000000000000..3b33cb33ccb01
--- /dev/null
+++ b/examples/routing_example/public/app.tsx
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import React from 'react';
+import ReactDOM from 'react-dom';
+import { AppMountParameters } from 'kibana/public';
+import {
+ EuiPage,
+ EuiPageBody,
+ EuiPageContent,
+ EuiText,
+ EuiHorizontalRule,
+ EuiPageContentHeader,
+ EuiListGroup,
+} from '@elastic/eui';
+import { RandomNumberRouteExample } from './random_number_example';
+import { RandomNumberBetweenRouteExample } from './random_number_between_example';
+import { Services } from './services';
+import { PostMessageRouteExample } from './post_message_example';
+import { GetMessageRouteExample } from './get_message_example';
+
+type Props = Services;
+
+function RoutingExplorer({
+ fetchRandomNumber,
+ fetchRandomNumberBetween,
+ addSuccessToast,
+ postMessage,
+ getMessageById,
+}: Props) {
+ return (
+
+
+
+
+
+
Routing examples
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ );
+}
+
+export const renderApp = (props: Props, element: AppMountParameters['element']) => {
+ ReactDOM.render(, element);
+
+ return () => ReactDOM.unmountComponentAtNode(element);
+};
diff --git a/examples/routing_example/public/get_message_example.tsx b/examples/routing_example/public/get_message_example.tsx
new file mode 100644
index 0000000000000..3c34326564d2b
--- /dev/null
+++ b/examples/routing_example/public/get_message_example.tsx
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import React, { useCallback } from 'react';
+import { useState } from 'react';
+import {
+ EuiText,
+ EuiButton,
+ EuiLoadingSpinner,
+ EuiFieldText,
+ EuiCallOut,
+ EuiFormRow,
+} from '@elastic/eui';
+import { HttpFetchError } from '../../../src/core/public';
+import { isError } from './is_error';
+import { Services } from './services';
+
+interface Props {
+ getMessageById: Services['getMessageById'];
+}
+
+export function GetMessageRouteExample({ getMessageById }: Props) {
+ const [error, setError] = useState();
+ const [isFetching, setIsFetching] = useState(false);
+ const [message, setMessage] = useState('');
+ const [id, setId] = useState('');
+
+ const doFetch = useCallback(async () => {
+ if (isFetching) return;
+ setIsFetching(true);
+ const response = await getMessageById(id);
+
+ if (isError(response)) {
+ setError(response);
+ setMessage('');
+ } else {
+ setError(undefined);
+ setMessage(response);
+ }
+
+ setIsFetching(false);
+ }, [isFetching, getMessageById, setMessage, id]);
+
+ return (
+
+
+
GET example with param
+
+
This examples uses a simple GET route that takes an id as a param in the route path.
+ ) : null}
+
+
+ );
+}
diff --git a/examples/routing_example/public/index.ts b/examples/routing_example/public/index.ts
new file mode 100644
index 0000000000000..2bb703e71cbef
--- /dev/null
+++ b/examples/routing_example/public/index.ts
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { PluginInitializer } from 'kibana/public';
+import { RoutingExamplePlugin } from './plugin';
+
+export const plugin: PluginInitializer<{}, {}> = () => new RoutingExamplePlugin();
diff --git a/examples/routing_example/public/is_error.ts b/examples/routing_example/public/is_error.ts
new file mode 100644
index 0000000000000..528cca5b50d5d
--- /dev/null
+++ b/examples/routing_example/public/is_error.ts
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import { HttpFetchError } from '../../../src/core/public';
+
+export function isError(error: T | HttpFetchError): error is HttpFetchError {
+ return error instanceof HttpFetchError;
+}
diff --git a/examples/routing_example/public/plugin.tsx b/examples/routing_example/public/plugin.tsx
new file mode 100644
index 0000000000000..eabdd2ade05b2
--- /dev/null
+++ b/examples/routing_example/public/plugin.tsx
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import {
+ CoreStart,
+ Plugin,
+ CoreSetup,
+ AppMountParameters,
+ AppNavLinkStatus,
+} from '../../../src/core/public';
+import { DeveloperExamplesSetup } from '../../developer_examples/public';
+import { getServices } from './services';
+
+interface SetupDeps {
+ developerExamples: DeveloperExamplesSetup;
+}
+
+export class RoutingExamplePlugin implements Plugin<{}, {}, SetupDeps, {}> {
+ public setup(core: CoreSetup, { developerExamples }: SetupDeps) {
+ core.application.register({
+ id: 'routingExample',
+ title: 'Routing',
+ navLinkStatus: AppNavLinkStatus.hidden,
+ async mount(params: AppMountParameters) {
+ const [coreStart] = await core.getStartServices();
+ const startServices = getServices(coreStart);
+ const { renderApp } = await import('./app');
+ return renderApp(startServices, params.element);
+ },
+ });
+
+ developerExamples.register({
+ appId: 'routingExample',
+ title: 'Routing',
+ description: `Examples show how to use core routing and fetch services to register and query your own custom routes.`,
+ links: [
+ {
+ label: 'IRouter',
+ href:
+ 'https://github.com/elastic/kibana/blob/master/docs/development/core/server/kibana-plugin-core-server.irouter.md',
+ iconType: 'logoGithub',
+ target: '_blank',
+ size: 's',
+ },
+ {
+ label: 'HttpHandler (core.http.fetch)',
+ href:
+ 'https://github.com/elastic/kibana/blob/master/docs/development/core/public/kibana-plugin-core-public.httphandler.md',
+ iconType: 'logoGithub',
+ target: '_blank',
+ size: 's',
+ },
+ ],
+ });
+ return {};
+ }
+
+ public start(core: CoreStart) {
+ return {};
+ }
+
+ public stop() {}
+}
diff --git a/examples/routing_example/public/post_message_example.tsx b/examples/routing_example/public/post_message_example.tsx
new file mode 100644
index 0000000000000..3004d66c4aa97
--- /dev/null
+++ b/examples/routing_example/public/post_message_example.tsx
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import React, { useCallback } from 'react';
+import { useState } from 'react';
+import {
+ EuiText,
+ EuiButton,
+ EuiLoadingSpinner,
+ EuiFieldText,
+ EuiCallOut,
+ EuiFormRow,
+ EuiTextArea,
+} from '@elastic/eui';
+import { HttpFetchError } from '../../../src/core/public';
+import { isError } from './is_error';
+import { Services } from './services';
+
+interface Props {
+ postMessage: Services['postMessage'];
+ addSuccessToast: Services['addSuccessToast'];
+}
+
+export function PostMessageRouteExample({ postMessage, addSuccessToast }: Props) {
+ const [error, setError] = useState();
+ const [isPosting, setIsPosting] = useState(false);
+ const [message, setMessage] = useState('');
+ const [id, setId] = useState('');
+
+ const doFetch = useCallback(async () => {
+ if (isPosting) return;
+ setIsPosting(true);
+ const response = await postMessage(message, id);
+
+ if (response && isError(response)) {
+ setError(response);
+ } else {
+ setError(undefined);
+ addSuccessToast('Message was added!');
+ setMessage('');
+ setId('');
+ }
+
+ setIsPosting(false);
+ }, [isPosting, postMessage, addSuccessToast, setMessage, message, id]);
+
+ return (
+
+
+
POST example with body
+
+ This examples uses a simple POST route that takes a body parameter and an id as a param in
+ the route path.
+
+
+ setId(e.target.value)}
+ data-test-subj="routingExampleSetMessageId"
+ />
+
+
+ setMessage(e.target.value)}
+ />
+
+
+
+ doFetch()}
+ >
+ {isPosting ? : 'Post message'}
+
+
+
+ {error !== undefined ? (
+
+ {error.message}
+
+ ) : null}
+
+
+ );
+}
diff --git a/examples/routing_example/public/random_number_between_example.tsx b/examples/routing_example/public/random_number_between_example.tsx
new file mode 100644
index 0000000000000..9f75060193114
--- /dev/null
+++ b/examples/routing_example/public/random_number_between_example.tsx
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import React, { useCallback } from 'react';
+import { useState } from 'react';
+import {
+ EuiText,
+ EuiButton,
+ EuiLoadingSpinner,
+ EuiFieldText,
+ EuiCallOut,
+ EuiFormRow,
+} from '@elastic/eui';
+import { HttpFetchError } from '../../../src/core/public';
+import { isError } from './is_error';
+import { Services } from './services';
+
+interface Props {
+ fetchRandomNumberBetween: Services['fetchRandomNumberBetween'];
+}
+
+export function RandomNumberBetweenRouteExample({ fetchRandomNumberBetween }: Props) {
+ const [error, setError] = useState();
+ const [randomNumber, setRandomNumber] = useState(0);
+ const [isFetching, setIsFetching] = useState(false);
+ const [maxInput, setMaxInput] = useState('10');
+
+ const doFetch = useCallback(async () => {
+ if (isFetching) return;
+ setIsFetching(true);
+ const response = await fetchRandomNumberBetween(Number.parseInt(maxInput, 10));
+
+ if (isError(response)) {
+ setError(response);
+ } else {
+ setRandomNumber(response);
+ }
+
+ setIsFetching(false);
+ }, [isFetching, maxInput, fetchRandomNumberBetween]);
+
+ return (
+
+
+
GET example with query
+
+ This examples uses a simple GET route that takes a query parameter in the request and
+ returns a single number.
+
+ ) : null}
+
+
+ );
+}
diff --git a/examples/routing_example/public/random_number_example.tsx b/examples/routing_example/public/random_number_example.tsx
new file mode 100644
index 0000000000000..6b073826c854f
--- /dev/null
+++ b/examples/routing_example/public/random_number_example.tsx
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+import React, { useCallback } from 'react';
+import { useState } from 'react';
+import { EuiText, EuiButton, EuiLoadingSpinner, EuiCallOut } from '@elastic/eui';
+import { HttpFetchError } from '../../../src/core/public';
+import { Services } from './services';
+import { isError } from './is_error';
+
+interface Props {
+ fetchRandomNumber: Services['fetchRandomNumber'];
+}
+
+export function RandomNumberRouteExample({ fetchRandomNumber }: Props) {
+ const [error, setError] = useState(undefined);
+ const [randomNumber, setRandomNumber] = useState(0);
+ const [isFetching, setIsFetching] = useState(false);
+
+ const doFetch = useCallback(async () => {
+ if (isFetching) return;
+ setIsFetching(true);
+ const response = await fetchRandomNumber();
+
+ if (isError(response)) {
+ setError(response);
+ } else {
+ setRandomNumber(response);
+ }
+
+ setIsFetching(false);
+ }, [isFetching, fetchRandomNumber]);
+
+ return (
+
+
+
GET example
+
+ This examples uses a simple GET route that takes no parameters or body in the request and
+ returns a single number.
+