diff --git a/.eslintignore b/.eslintignore index 7e14ed328c7ee..8e2ee7fbfd9d3 100644 --- a/.eslintignore +++ b/.eslintignore @@ -6,3 +6,4 @@ /src/core_plugins/console/public/tests/webpackShims /src/ui/public/utils/decode_geo_hash.js /src/core_plugins/timelion/public/webpackShims/jquery.flot.* +/ui_framework/doc_site/build diff --git a/.eslintrc b/.eslintrc index 7a623df06a6ca..334f46159f232 100644 --- a/.eslintrc +++ b/.eslintrc @@ -1,2 +1,10 @@ --- -extends: '@elastic/kibana' +extends: + - '@elastic/eslint-config-kibana' + - '@elastic/eslint-config-kibana/jest' + +settings: + import/resolver: + '@elastic/eslint-import-resolver-kibana': + rootPackageName: 'kibana' + kibanaPath: . diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 5ff8531861a06..e7ca77ccc22f0 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -4,10 +4,10 @@ are a few simple things to check before submitting your pull request that can help with the review process. You should delete these items from your submission, but they are here to help bring them to your attention. ---> - Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)? - Have you followed the [contributor guidelines](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md)? - If submitting code, have you included unit tests that cover the changes? - If submitting code, have you tested and built your code locally prior to submission with `npm test && npm run build`? - If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed. +--> diff --git a/.gitignore b/.gitignore index ea4c416671298..fbd0bc3f495b5 100644 --- a/.gitignore +++ b/.gitignore @@ -5,19 +5,17 @@ .node_binaries node_modules trash -/optimize/bundles -/optimize/testdev -/optimize/testUiServer +/optimize target /build .jruby .idea *.iml *.log -/test/screenshots/diff -/test/screenshots/failure -/test/screenshots/session -/test/screenshots/visual_regression_gallery.html +/test/*/screenshots/diff +/test/*/screenshots/failure +/test/*/screenshots/session +/test/*/screenshots/visual_regression_gallery.html /html_docs /esvm .htpasswd @@ -35,6 +33,7 @@ selenium *.swp *.swo *.out -ui_framework/doc_site/build/*.js* -ui_framework/jest/report +ui_framework/doc_site/build +!ui_framework/doc_site/build/index.html yarn.lock +.yo-rc.json diff --git a/.node-version b/.node-version index e1e5d1369adba..fac714a3220d7 100644 --- a/.node-version +++ b/.node-version @@ -1 +1 @@ -6.9.5 +6.11.1 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 40949008f0d35..0000000000000 --- a/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: node_js -node_js: 4 -env: - - CXX=g++-4.8 -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-4.8 - -install: - - npm install - - npm run setup_kibana - -cache: - directories: - - node_modules - - ../kibana - -script: npm test diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b0ef2231b4b59..053f294ca0c43 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -37,16 +37,6 @@ A high level overview of our contributing guidelines. Don't fret, it's not as daunting as the table of contents makes it out to be! -## Effective issue reporting in Kibana - -At any given time the Kibana team at Elastic is working on dozens of features and enhancements, both for Kibana itself and for a few other projects at Elastic. When you file an issue, we'll take the time to digest it, consider solutions, and weigh its applicability to both the Kibana user base at large and the long-term vision for the project. Once we've completed that process, we will assign the issue a priority. - -- **P1**: A high-priority issue that affects virtually all Kibana users. Bugs that would cause incorrect results, security issues and features that would vastly improve the user experience for everyone. Work arounds for P1s generally don't exist without a code change. -- **P2**: A broadly applicable, high visibility, issue that enhances the usability of Kibana for a majority users. -- **P3**: Nice-to-have bug fixes or functionality. Work arounds for P3 items generally exist. -- **P4**: Niche and special interest issues that may not fit our core goals. We would take a high quality pull for this if implemented in such a way that it does not meaningfully impact other functionality or existing code. Issues may also be labeled P4 if they would be better implemented in Elasticsearch. -- **P5**: Highly niche or in opposition to our core goals. Should usually be closed. This doesn't mean we wouldn't take a pull for it, but if someone really wanted this they would be better off working on a plugin. The Kibana team will usually not work on P5 issues but may be willing to assist plugin developers on IRC. - ### Voicing the importance of an issue We seriously appreciate thoughtful comments. If an issue is important to you, add a comment with a solid write up of your use case and explain why it's so important. Please avoid posting comments comprised solely of a thumbs up emoji 👍. @@ -71,6 +61,12 @@ We enjoy working with contributors to get their code accepted. There are many ap ## How We Use Git and GitHub +### Forking + +We follow the [GitHub forking model](https://help.github.com/articles/fork-a-repo/) for collaborating +on Kibana code. This model assumes that you have a remote called `upstream` which points to the +official Kibana repo, which we'll refer to in later code snippets. + ### Branching * All work on the next major release goes into master. @@ -84,9 +80,57 @@ We enjoy working with contributors to get their code accepted. There are many ap * Feel free to make as many commits as you want, while working on a branch. * When submitting a PR for review, please perform an interactive rebase to present a logical history that's easy for the reviewers to follow. * Please use your commit messages to include helpful information on your changes, e.g. changes to APIs, UX changes, bugs fixed, and an explanation of *why* you made the changes that you did. -* Resolve merge conflicts by rebasing the target branch over your feature branch, and force-pushing. +* Resolve merge conflicts by rebasing the target branch over your feature branch, and force-pushing (see below for instructions). * When merging, we'll squash your commits into a single commit. +#### Rebasing and fixing merge conflicts + +Rebasing can be tricky, and fixing merge conflicts can be even trickier because it involves force pushing. This is all compounded by the fact that attempting to push a rebased branch remotely will be rejected by git, and you'll be prompted to do a `pull`, which is not at all what you should do (this will really mess up your branch's history). + +Here's how you should rebase master onto your branch, and how to fix merge conflicts when they arise. + +First, make sure master is up-to-date. + +``` +git checkout master +git fetch upstream +git rebase upstream/master +``` + +Then, check out your branch and rebase master on top of it, which will apply all of the new commits on master to your branch, and then apply all of your branch's new commits after that. + +``` +git checkout name-of-your-branch +git rebase master +``` + +You want to make sure there are no merge conflicts. If there is are merge conflicts, git will pause the rebase and allow you to fix the conflicts before continuing. + +You can use `git status` to see which files contain conflicts. They'll be the ones that aren't staged for commit. Open those files, and look for where git has marked the conflicts. Resolve the conflicts so that the changes you want to make to the code have been incorporated in a way that doesn't destroy work that's been done in master. Refer to master's commit history on GitHub if you need to gain a better understanding of how code is conflicting and how best to resolve it. + +Once you've resolved all of the merge conflicts, use `git add -A` to stage them to be commiteed, and then use `git rebase --continue` to tell git to continue the rebase. + +When the rebase has completed, you will need to force push your branch because the history is now completely different than what's on the remote. **This is potentially dangerous** because it will completely overwrite what you have on the remote, so you need to be sure that you haven't lost any work when resolving merge conflicts. (If there weren't any merge conflicts, then you can force push without having to worry about this.) + +``` +git push origin name-of-your-branch --force +``` + +This will overwrite the remote branch with what you have locally. You're done! + +**Note that you should not run `git pull`**, for example in response to a push rejection like this: + +``` +! [rejected] name-of-your-branch -> name-of-your-branch (non-fast-forward) +error: failed to push some refs to 'https://github.com/YourGitHubHandle/kibana.git' +hint: Updates were rejected because the tip of your current branch is behind +hint: its remote counterpart. Integrate the remote changes (e.g. +hint: 'git pull ...') before pushing again. +hint: See the 'Note about fast-forwards' in 'git push --help' for details. +``` + +Assuming you've successfully rebased and you're happy with the code, you should force push instead. + ### What Goes Into a Pull Request * Please include an explanation of your changes in your PR description. @@ -130,10 +174,10 @@ npm run elasticsearch If you're just getting started with `elasticsearch`, you could use the following command to populate your instance with a few fake logs to hit the ground running. ```bash -npm run makelogs +node scripts/makelogs ``` -> Make sure to execute `npm run makelogs` *after* elasticsearch is up and running! +> Make sure to execute `node scripts/makelogs` *after* elasticsearch is up and running! Start the development server. ```bash @@ -158,19 +202,7 @@ In development mode, Kibana runs a customized version of [Webpack](http://webpac #### Setting Up SSL -When Kibana runs in development mode it will automatically use bundled SSL certificates. These certificates won't be trusted by your OS by default which will likely cause your browser to complain about the certificate. - -If you run into this issue, visit the development server and configure your OS to trust the certificate. - -- OSX: https://www.accuweaver.com/2014/09/19/make-chrome-accept-a-self-signed-certificate-on-osx/ -- Windows: http://stackoverflow.com/a/1412118 -- Linux: http://unix.stackexchange.com/a/90607 - -There are a handful of other options, although we enthusiastically recommend that you trust our development certificate. - -- Click through the warning and accept future warnings -- Supply your own certificate using the `config/kibana.dev.yml` file -- Disable SSL in Kibana by starting the application with `npm start -- --no-ssl` +Kibana includes a self-signed certificate that can be used for development purposes: `npm start -- --ssl`. ### Linting @@ -218,13 +250,13 @@ npm run test:server When you'd like to execute individual server-side test files, you can use the command below. Note that this command takes care of configuring Mocha with Babel compilation for you, and you'll be better off avoiding a globally installed `mocha` package. This command is great for development and for quickly identifying bugs. ```bash -npm run mocha +node scripts/mocha ``` -You could also add the `:debug` target so that `node` is run using the `--debug-brk` flag. You'll need to connect a remote debugger such as [`node-inspector`](https://github.com/node-inspector/node-inspector) to proceed in this mode. +You could also add the `--debug` option so that `node` is run using the `--debug-brk` flag. You'll need to connect a remote debugger such as [`node-inspector`](https://github.com/node-inspector/node-inspector) to proceed in this mode. ```bash -npm run mocha:debug +node scripts/mocha --debug ``` With `npm run test:browser`, you can run only the browser tests. Coverage reports are available for browser tests by running `npm run test:coverage`. You can find the results under the `coverage/` directory that will be created upon completion. @@ -245,10 +277,11 @@ npm run test:dev This should work super if you're using the [Kibana plugin generator](https://github.com/elastic/generator-kibana-plugin). If you're not using the generator, well, you're on your own. We suggest you look at how the generator works. -To run the tests for just your particular plugin, assuming you plugin lives outside of the `plugins directory`, use the following command. +To run the tests for just your particular plugin run the following command from your plugin: ```bash -npm run test:dev -- --kbnServer.testsBundle.pluginId=some_special_plugin --kbnServer.plugin-path=../some_special_plugin +npm run test:server +npm run test:browser -- --dev # remove the --dev flag to run them once and close ``` ### Cross-browser Compatibility @@ -262,21 +295,19 @@ npm run test:dev -- --kbnServer.testsBundle.pluginId=some_special_plugin --kbnSe * Open VMWare and go to Window > Virtual Machine Library. Unzip the virtual machine and drag the .vmx file into your Virtual Machine Library. * Right-click on the virtual machine you just added to your library and select "Snapshots...", and then click the "Take" button in the modal that opens. You can roll back to this snapshot when the VM expires in 90 days. * In System Preferences > Sharing, change your computer name to be something simple, e.g. "computer". -* Run Kibana with `npm start -- --no-ssl --host=computer.local` (substituting your computer name). +* Run Kibana with `npm start -- --host=computer.local` (substituting your computer name). * Now you can run your VM, open the browser, and navigate to `http://computer.local:5601` to test Kibana. #### Running Browser Automation Tests The following will start Kibana, Elasticsearch and the chromedriver for you. To run the functional UI tests use the following commands -If you want to run the functional UI tests one time and exit, use the following command. This is used by the CI systems and is great for quickly checking that things pass. It is essentially a combination of the next two tasks. This supports options `--grep=foo` for only running tests that match a regular expression, and `--appSuites=management` for running tests for a specific application. - ```bash npm run test:ui ``` -In order to start the server required for the `test:ui:runner` tasks, use the following command. Once the server is started `test:ui:runner` can be run multiple times without waiting for the server to start. +In order to start the server required for the `node scripts/functional_test_runner` tasks, use the following command. Once the server is started `node scripts/functional_test_runner` can be run multiple times without waiting for the server to start. ```bash npm run test:ui:server @@ -285,9 +316,11 @@ npm run test:ui:server To execute the front-end browser tests, enter the following. This requires the server started by the `test:ui:server` task. ```bash -npm run test:ui:runner +node scripts/functional_test_runner ``` +To filter these tests, use `--grep=foo` for only running tests that match a regular expression. + To run these browser tests against against some other Elasticsearch and Kibana instance you can set these environment variables and then run the test runner. Here's an example to run against an Elastic Cloud instance (note that you should run the same branch of tests as the version of Kibana you're testing); @@ -303,17 +336,12 @@ export TEST_ES_HOSTNAME=aaa5d22032d76805fcce724ed9d9f5a2.us-east-1.aws.found.io export TEST_ES_PORT=9200 export TEST_ES_USER=elastic export TEST_ES_PASS= -npm run test:ui:runner +node scripts/functional_test_runner ``` ##### Browser Automation Notes -- Using Page Objects pattern (https://theintern.github.io/intern/#writing-functional-test) -- At least the initial tests for the Settings, Discover, and Visualize tabs all depend on a very specific set of logstash-type data (generated with makelogs). Since that is a static set of data, all the Discover and Visualize tests use a specific Absolute time range. This guarantees the same results each run. -- These tests have been developed and tested with Chrome and Firefox browser. In theory, they should work on all browsers (that's the benefit of Intern using Leadfoot). -- These tests should also work with an external testing service like https://saucelabs.com/ or https://www.browserstack.com/ but that has not been tested. -- https://theintern.github.io/ -- https://theintern.github.io/leadfoot/module-leadfoot_Element.html +[Read about the `FunctionalTestRunner`](https://www.elastic.co/guide/en/kibana/current/development-functional-tests.html) to learn more about how you can run and develop functional tests for Kibana core and plugins. ### Building OS packages @@ -374,4 +402,4 @@ Remember, someone is blocked by a pull awaiting review, make it count. Be thorou 1. **Hand it off** If you're the first reviewer and everything looks good but the changes are more than a few lines, hand the pull to someone else to take a second look. Again, try to find the right person to assign it to. 1. **Merge the code** When everything looks good, put in a `LGTM` (looks good to me) comment. Merge into the target branch. Check the labels on the pull to see if backporting is required, and perform the backport if so. -Thank you so much for reading our guidelines! :tada: \ No newline at end of file +Thank you so much for reading our guidelines! :tada: diff --git a/Gruntfile.js b/Gruntfile.js index 16157cf5e533c..d5903f9a36d14 100644 --- a/Gruntfile.js +++ b/Gruntfile.js @@ -12,7 +12,6 @@ module.exports = function (grunt) { plugins: __dirname + '/src/core_plugins', server: __dirname + '/src/server', target: __dirname + '/target', // location of the compressed build targets - testUtilsDir: __dirname + '/src/test_utils', configFile: __dirname + '/src/config/kibana.yml', karmaBrowser: (function () { @@ -57,7 +56,7 @@ module.exports = function (grunt) { init: true, config: config, loadGruntTasks: { - pattern: ['grunt-*', '@*/grunt-*', 'gruntify-*', '@*/gruntify-*', 'intern'] + pattern: ['grunt-*', '@*/grunt-*', 'gruntify-*', '@*/gruntify-*'] } }); diff --git a/README.md b/README.md index ee02f6b1dd0cd..a9524d7caee8d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Kibana 6.0.0-alpha1 +# Kibana 7.0.0-alpha1 Kibana is your window into the [Elastic Stack](https://www.elastic.co/products). Specifically, it's an open source ([Apache Licensed](LICENSE.md)), @@ -40,14 +40,13 @@ out an open PR: ### Snapshot Builds -For the daring, snapshot builds are available. These builds are created nightly and have undergone no formal QA, so they should never be run in production. +For the daring, snapshot builds are available. These builds are created nightly and have undergone no formal QA, so they should never be run in production. All builds are 64 bit. | platform | | | --- | --- | -| OSX | [tar](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-darwin-x86_64.tar.gz) | -| Linux x64 | [tar](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-linux-x86_64.tar.gz) [deb](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-amd64.deb) [rpm](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-x86_64.rpm) | -| Linux x86 | [tar](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-linux-x86.tar.gz) [deb](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-i386.deb) [rpm](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-i686.rpm) | -| Windows | [zip](https://snapshots.elastic.co/downloads/kibana/kibana-6.0.0-alpha1-SNAPSHOT-windows-x86.zip) | +| OSX | [tar](https://snapshots.elastic.co/downloads/kibana/kibana-7.0.0-alpha1-SNAPSHOT-darwin-x86_64.tar.gz) | +| Linux | [tar](https://snapshots.elastic.co/downloads/kibana/kibana-7.0.0-alpha1-SNAPSHOT-linux-x86_64.tar.gz) [deb](https://snapshots.elastic.co/downloads/kibana/kibana-7.0.0-alpha1-SNAPSHOT-amd64.deb) [rpm](https://snapshots.elastic.co/downloads/kibana/kibana-7.0.0-alpha1-SNAPSHOT-x86_64.rpm) | +| Windows | [zip](https://snapshots.elastic.co/downloads/kibana/kibana-7.0.0-alpha1-SNAPSHOT-windows-x86_64.zip) | ## Documentation diff --git a/STYLEGUIDE.md b/STYLEGUIDE.md index 514399a60a13f..5064899781a9f 100644 --- a/STYLEGUIDE.md +++ b/STYLEGUIDE.md @@ -5,10 +5,13 @@ recommended for the development of all Kibana plugins. - [JavaScript](style_guides/js_style_guide.md) - [Angular](style_guides/angular_style_guide.md) +- [React](style_guides/react_style_guide.md) - [CSS](style_guides/css_style_guide.md) +- [SCSS](style_guides/scss_style_guide.md) - [HTML](style_guides/html_style_guide.md) - [API](style_guides/api_style_guide.md) -- [Architecture](style_guides/architecture.md) +- [Architecture](style_guides/architecture_style_guide.md) +- [Accessibility](style_guides/acecssibility_guide.md) ## Filenames diff --git a/bin/kibana b/bin/kibana index 93854e3f8e5fb..b8f2dd72c9ed5 100755 --- a/bin/kibana +++ b/bin/kibana @@ -21,4 +21,4 @@ if [ ! -x "$NODE" ]; then exit 1 fi -exec "${NODE}" $NODE_OPTIONS --no-warnings "${DIR}/src/cli" ${@} +NODE_ENV=production exec "${NODE}" $NODE_OPTIONS --no-warnings "${DIR}/src/cli" ${@} diff --git a/bin/kibana-plugin b/bin/kibana-plugin index 02fc4e0d6124e..275664e849f1c 100755 --- a/bin/kibana-plugin +++ b/bin/kibana-plugin @@ -21,4 +21,4 @@ if [ ! -x "$NODE" ]; then exit 1 fi -exec "${NODE}" $NODE_OPTIONS --no-warnings "${DIR}/src/cli_plugin" "$@" +NODE_ENV=production exec "${NODE}" $NODE_OPTIONS --no-warnings "${DIR}/src/cli_plugin" "$@" diff --git a/bin/kibana-plugin.bat b/bin/kibana-plugin.bat index 3a1554f6f780f..e50832cd62c9b 100644 --- a/bin/kibana-plugin.bat +++ b/bin/kibana-plugin.bat @@ -7,6 +7,8 @@ for %%I in ("%SCRIPT_DIR%..") do set DIR=%%~dpfI set NODE=%DIR%\node\node.exe +set NODE_ENV="production" + WHERE /Q node IF %ERRORLEVEL% EQU 0 ( for /f "delims=" %%i in ('WHERE node') do set SYS_NODE=%%i diff --git a/bin/kibana.bat b/bin/kibana.bat index d126bb9d1c92b..e6ad88f6ce3f8 100644 --- a/bin/kibana.bat +++ b/bin/kibana.bat @@ -7,6 +7,8 @@ for %%I in ("%SCRIPT_DIR%..") do set DIR=%%~dpfI set NODE=%DIR%\node\node.exe +set NODE_ENV="production" + WHERE /Q node IF %ERRORLEVEL% EQU 0 ( for /f "delims=" %%i in ('WHERE node') do set SYS_NODE=%%i diff --git a/docs/console.asciidoc b/docs/console.asciidoc index cddfc4cfb3aa3..f2fd12644dec1 100644 --- a/docs/console.asciidoc +++ b/docs/console.asciidoc @@ -1,8 +1,6 @@ [[console-kibana]] -= Console +== Console -[partintro] --- The Console plugin provides a UI to interact with the REST API of Elasticsearch. Console has two main areas: the *editor*, where you compose requests to Elasticsearch, and the *response* pane, which displays the responses to the request. Enter the address of your Elasticsearch server in the text box on the top of screen. The default value of this address @@ -61,11 +59,11 @@ image::images/introduction_output.png[Screenshot] [float] [[console-ui]] -== The Console UI +=== The Console UI In this section you will find a more detailed description of UI of Console. The basic aspects of the UI are explained in the <> section. --- + include::console/multi-requests.asciidoc[] diff --git a/docs/console/auto-formatting.asciidoc b/docs/console/auto-formatting.asciidoc index 6c78cf92f4efb..21c730e3fd020 100644 --- a/docs/console/auto-formatting.asciidoc +++ b/docs/console/auto-formatting.asciidoc @@ -1,5 +1,5 @@ [[auto-formatting]] -== Auto Formatting +=== Auto Formatting Console allows you to auto format messy requests. To do so, position the cursor on the request you would like to format and select Auto Indent from the action menu: diff --git a/docs/console/configuring-console.asciidoc b/docs/console/configuring-console.asciidoc index 0a02043c1de4c..9ff833fc58d85 100644 --- a/docs/console/configuring-console.asciidoc +++ b/docs/console/configuring-console.asciidoc @@ -1,5 +1,5 @@ [[configuring-console]] -== Configuring Console +=== Configuring Console You can add the following options in the `config/kibana.yml` file: diff --git a/docs/console/history.asciidoc b/docs/console/history.asciidoc index a5d7573db04a6..f16e5fd47d1f3 100644 --- a/docs/console/history.asciidoc +++ b/docs/console/history.asciidoc @@ -1,5 +1,5 @@ [[history]] -== History +=== History Console maintains a list of the last 500 requests that were successfully executed by Elasticsearch. The history is available by clicking the clock icon on the top right side of the window. The icons opens the history panel diff --git a/docs/console/keyboard-shortcuts.asciidoc b/docs/console/keyboard-shortcuts.asciidoc index 6f3216cfddf8f..8120400bf8d09 100644 --- a/docs/console/keyboard-shortcuts.asciidoc +++ b/docs/console/keyboard-shortcuts.asciidoc @@ -1,10 +1,10 @@ [[keyboard-shortcuts]] -== Keyboard shortcuts +=== Keyboard shortcuts Console comes with a set of nifty keyboard shortcuts making working with it even more efficient. Here is an overview: [float] -=== General editing +==== General editing Ctrl/Cmd + I:: Auto indent current request. Ctrl + Space:: Open Auto complete (even if not typing). @@ -14,7 +14,7 @@ Ctrl/Cmd + Alt + L:: Collapse/expand current scope. Ctrl/Cmd + Option + 0:: Collapse all scopes but the current one. Expand by adding a shift. [float] -=== When auto-complete is visible +==== When auto-complete is visible Down arrow:: Switch focus to auto-complete menu. Use arrows to further select a term. Enter/Tab:: Select the currently selected or the top most term in auto-complete menu. diff --git a/docs/console/multi-requests.asciidoc b/docs/console/multi-requests.asciidoc index d385f1ae39ec1..ad4f4a6f27897 100644 --- a/docs/console/multi-requests.asciidoc +++ b/docs/console/multi-requests.asciidoc @@ -1,5 +1,5 @@ [[multi-requests]] -== Multiple Requests Support +=== Multiple Requests Support The Console editor allows writing multiple requests below each other. As shown in the <> section, you can submit a request to Elasticsearch by positioning the cursor and using the <>. Similarly diff --git a/docs/console/settings.asciidoc b/docs/console/settings.asciidoc index 1f49f32d2a45c..27865a98617e6 100644 --- a/docs/console/settings.asciidoc +++ b/docs/console/settings.asciidoc @@ -1,5 +1,5 @@ [[console-settings]] -== Settings +=== Settings Console has multiple settings you can set. All of them are available in the Settings panel. To open the panel click on the cog icon on the top right. diff --git a/docs/development/core-development.asciidoc b/docs/development/core-development.asciidoc index 2b7709f003117..45f52c16ee332 100644 --- a/docs/development/core-development.asciidoc +++ b/docs/development/core-development.asciidoc @@ -5,6 +5,7 @@ * <> * <> * <> +* <> include::core/development-basepath.asciidoc[] @@ -12,4 +13,6 @@ include::core/development-dependencies.asciidoc[] include::core/development-modules.asciidoc[] -include::plugin/development-elasticsearch.asciidoc[] +include::core/development-elasticsearch.asciidoc[] + +include::core/development-functional-tests.asciidoc[] diff --git a/docs/development/plugin/development-elasticsearch.asciidoc b/docs/development/core/development-elasticsearch.asciidoc similarity index 82% rename from docs/development/plugin/development-elasticsearch.asciidoc rename to docs/development/core/development-elasticsearch.asciidoc index c4faefe249860..751a507753b91 100644 --- a/docs/development/plugin/development-elasticsearch.asciidoc +++ b/docs/development/core/development-elasticsearch.asciidoc @@ -3,7 +3,7 @@ Kibana exposes two clients on the server and browser for communicating with elasticsearch. There is an 'admin' client which is used for managing Kibana's state, and a 'data' client for all -other requests. The clients use the https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html[elasticsearch.js library]. +other requests. The clients use the {client-ref}/javascript-api/current/index.html[elasticsearch.js library]. [float] [[client-server]] @@ -31,9 +31,8 @@ Browser clients are exposed through AngularJS services. [source,javascript] ---- uiModules.get('kibana') -.run(function (esAdmin, es) { +.run(function (es) { es.ping() - .then(() => esAdmin.ping()) .catch(err => { console.log('error pinging servers'); }); diff --git a/docs/development/core/development-functional-tests.asciidoc b/docs/development/core/development-functional-tests.asciidoc new file mode 100644 index 0000000000000..73419178860ac --- /dev/null +++ b/docs/development/core/development-functional-tests.asciidoc @@ -0,0 +1,365 @@ +[[development-functional-tests]] +=== Functional Testing + +We use functional tests to make sure the Kibana UI works as expected. It replaces hours of manual testing by automating user interaction. To have better control over our functional test environment, and to make it more accessible to plugin authors, Kibana uses a tool called the `FunctionalTestRunner`. + +[float] +==== Running functional tests + +The `FunctionalTestRunner` is very bare bones and gets most of its functionality from its config file, located at {blob}test/functional/config.js[test/functional/config.js]. If you’re writing a plugin you will have your own config file. See <> for more info. + +Execute the `FunctionalTestRunner`'s script with node.js to run the tests with Kibana's default configuration: + +["source","shell"] +----------- +node scripts/functional_test_runner +----------- + +When run without any arguments the `FunctionalTestRunner` automatically loads the configuration in the standard location, but you can override that behavior with the `--config` flag. There are also command line flags for `--bail` and `--grep`, which behave just like their mocha counterparts. The logging can also be customized with `--quiet`, `--debug`, or `--verbose` flags. + +Use the `--help` flag for more options. + +[float] +==== Writing functional tests + +[float] +===== Environment + +The tests are written in https://mochajs.org[mocha] using https://github.com/Automattic/expect.js[expect] for assertions. + +We use https://sites.google.com/a/chromium.org/chromedriver/[chromedriver], https://theintern.github.io/leadfoot[leadfoot], and https://github.com/theintern/digdug[digdug] for automating Chrome. When the `FunctionalTestRunner` launches, digdug opens a `Tunnel` which starts chromedriver and a stripped-down instance of Chrome. It also creates an instance of https://theintern.github.io/leadfoot/module-leadfoot_Command.html[Leadfoot's `Command`] class, which is available via the `remote` service. The `remote` communicates to Chrome through the digdug `Tunnel`. See the https://theintern.github.io/leadfoot/module-leadfoot_Command.html[leadfoot/Command API] docs for all the commands you can use with `remote`. + +The `FunctionalTestRunner` automatically transpiles functional tests using babel, so that tests can use the same ECMAScript features that Kibana source code uses. See {blob}style_guides/js_style_guide.md[style_guides/js_style_guide.md]. + +[float] +===== Definitions + +**Provider:** + +Code run by the `FunctionalTestRunner` is wrapped in a function so it can be passed around via config files and be parameterized. Any of these Provider functions may be asynchronous and should return/resolve-to the value they are meant to _provide_. Provider functions will always be called with a single argument: a provider API (see the <>). + +A config provder: + +["source","js"] +----------- +// config and test files use `export default` +export default function (/* { providerAPI } */) { + return { + // ... + } +} +----------- + +**Services**::: +Services are named singleton values produced by a Service Provider. Tests and other services can retrieve service instances by asking for them by name. All functionality except the mocha API is exposed via services.\ + +**Page objects**::: +Page objects are a special type of service that encapsulate behaviors common to a particular page or plugin. When you write your own plugin, you’ll likely want to add a page object (or several) that describes the common interactions your tests need to execute. + +**Test Files**::: +The `FunctionalTestRunner`'s primary purpose is to execute test files. These files export a Test Provider that is called with a Provider API but is not expected to return a value. Instead Test Providers define a suite using https://mochajs.org/#bdd[mocha's BDD interface]. + +**Test Suite**::: +A test suite is a collection of tests defined by calling `describe()`, and then populated with tests and setup/teardown hooks by calling `it()`, `before()`, `beforeEach()`, etc. Every test file must define only one top level test suite, and test suites can have as many nested test suites as they like. + +[float] +===== Anatomy of a test file + +The annotated example file below shows the basic structure every test suite uses. It starts by importing https://github.com/Automattic/expect.js[`expect.js`] and defining its default export: an anonymous Test Provider. The test provider then destructures the Provider API for the `getService()` and `getPageObjects()` functions. It uses these functions to collect the dependencies of this suite. The rest of the test file will look pretty normal to mocha.js users. `describe()`, `it()`, `before()` and the lot are used to define suites that happen to automate a browser via services and objects of type `PageObject`. + +["source","js"] +---- +import expect from 'expect.js'; +// test files must `export default` a function that defines a test suite +export default function ({ getService, getPageObject }) { + + // most test files will start off by loading some services + const retry = getService('retry'); + const testSubjects = getService('testSubjects'); + const esArchiver = getService('esArchiver'); + + // for historical reasons, PageObjects are loaded in a single API call + // and returned on an object with a key/value for each requested PageObject + const PageObjects = getPageObjects(['common', 'visualize']); + + // every file must define a top-level suite before defining hooks/tests + describe('My Test Suite', () => { + + // most suites start with a before hook that navigates to a specific + // app/page and restores some archives into elasticsearch with esArchiver + before(async () => { + await Promise.all([ + // start with an empty .kibana index + esArchiver.load('empty_kibana'), + // load some basic log data only if the index doesn't exist + esArchiver.loadIfNeeded('makelogs') + ]); + // go to the page described by `apps.visualize` in the config + await PageObjects.common.navigateTo('visualize'); + }); + + // right after the before() hook definition, add the teardown steps + // that will tidy up elasticsearch for other test suites + after(async () => { + // we unload the empty_kibana archive but not the makelogs + // archive because we don't make any changes to it, and subsequent + // suites could use it if they call `.loadIfNeeded()`. + await esArchiver.unload('empty_kibana'); + }); + + // This series of tests illustrate how tests generally verify + // one step of a larger process and then move on to the next in + // a new test, each step building on top of the previous + it('Vis Listing Page is empty'); + it('Create a new vis'); + it('Shows new vis in listing page'); + it('Opens the saved vis'); + it('Respects time filter changes'); + it(... + }); + +} +---- + +[float] +[[functional_test_runner_provider_api]] +==== Provider API + +The first and only argument to all providers is a Provider API Object. This object can be used to load service/page objects and config/test files. + +Within config files the API has the following properties + +[horizontal] +`log`::: An instance of the {blob}src/utils/tooling_log/tooling_log.js[`ToolingLog`] that is ready for use +`readConfigFile(path)`::: Returns a promise that will resolve to a Config instance that provides the values from the config file at `path` + +Within service and PageObject Providers the API is: + +[horizontal] +`getService(name)`::: Load and return the singleton instance of a service by name +`getPageObjects(names)`::: Load the singleton instances of `PageObject`s and collect them on an object where each name is the key to the singleton instance of that PageObject + +Within a test Provider the API is exactly the same as the service providers API but with an additional method: + +[horizontal] +`loadTestFile(path)`::: Load the test file at path in place. Use this method to nest suites from other files into a higher-level suite + +[float] +==== Service Index + +[float] +===== Built-in Services + +The `FunctionalTestRunner` comes with three built-in services: + +**config:**::: +* Source: {blob}src/functional_test_runner/lib/config/config.js[src/functional_test_runner/lib/config/config.js] +* Schema: {blob}src/functional_test_runner/lib/config/schema.js[src/functional_test_runner/lib/config/schema.js] +* Use `config.get(path)` to read any value from the config file + +**log:**::: +* Source: {blob}src/utils/tooling_log/tooling_log.js[src/utils/tooling_log/tooling_log.js] +* `ToolingLog` instances are readable streams. The instance provided by this service is automatically piped to stdout by the `FunctionalTestRunner` CLI +* `log.verbose()`, `log.debug()`, `log.info()`, `log.warning()` all work just like console.log but produce more organized output + +**lifecycle:**::: +* Source: {blob}src/functional_test_runner/lib/lifecycle.js[src/functional_test_runner/lib/lifecycle.js] +* Designed primary for use in services +* Exposes lifecycle events for basic coordination. Handlers can return a promise and resolve/fail asynchronously +* Phases include: `beforeLoadTests`, `beforeTests`, `beforeEachTest`, `cleanup`, `phaseStart`, `phaseEnd` + +[float] +===== Kibana Services + +The Kibana functional tests define the vast majority of the actual functionality used by tests. + +**retry:**::: +* Source: {blob}test/functional/services/retry.js[test/functional/services/retry.js] +* Helpers for retrying operations +* Popular methods: +** `retry.try(fn)` - execute `fn` in a loop until it succeeds or the default try timeout elapses +** `retry.tryForTime(ms, fn)` execute fn in a loop until it succeeds or `ms` milliseconds elapses + +**testSubjects:**::: +* Source: {blob}test/functional/services/test_subjects.js[test/functional/services/test_subjects.js] +* Test subjects are elements that are tagged specifically for selecting from tests +* Use `testSubjects` over CSS selectors when possible +* Usage: +** Tag your test subject with a `data-test-subj` attribute: ++ +["source","html"] +----------- +
+
+----------- ++ +** Click this button using the `testSubjects` helper: ++ +["source","js"] +----------- +await testSubjects.click(‘containerButton’); +----------- ++ +* Popular methods: +** `testSubjects.find(testSubjectSelector)` - Find a test subject in the page; throw if it can't be found after some time +** `testSubjects.click(testSubjectSelector)` - Click a test subject in the page; throw if it can't be found after some time + +**find:**::: +* Source: {blob}test/functional/services/find.js[test/functional/services/find.js] +* Helpers for `remote.findBy*` methods that log and manage timeouts +* Popular methods: +** `find.byCssSelector()` +** `find.allByCssSelector()` + +**kibanaServer:**::: +* Source: {blob}test/functional/services/kibana_server/kibana_server.js[test/functional/services/kibana_server/kibana_server.js] +* Helpers for interacting with Kibana's server +* Commonly used methods: +** `kibanaServer.uiSettings.update()` +** `kibanaServer.version.get()` +** `kibanaServer.status.getOverallState()` + +**esArchiver:**::: +* Source: {blob}test/functional/services/es_archiver.js[test/functional/services/es_archiver.js] +* Load/unload archives created with the `esArchiver` +* Popular methods: +** `esArchiver.load(name)` +** `esArchiver.loadIfNeeded(name)` +** `esArchiver.unload(name)` + +**docTable:**::: +* Source: {blob}test/functional/services/doc_table.js[test/functional/services/doc_table.js] +* Helpers for interacting with doc tables + +**pointSeriesVis:**::: +* Source: {blob}test/functional/services/point_series_vis.js[test/functional/services/point_series_vis.js] +* Helpers for interacting with point series visualizations + +**Low-level utilities:**::: +* es +** Source: {blob}test/functional/services/es.js[test/functional/services/es.js] +** Elasticsearch client +** Higher level options: `kibanaServer.uiSettings` or `esArchiver` +* remote +** Source: {blob}test/functional/services/remote/remote.js[test/functional/services/remote/remote.js] +** Instance of https://theintern.github.io/leadfoot/module-leadfoot_Command.html[Leadfoot's `Command]` class +** Responsible for all communication with the browser +** Higher level options: `testSubjects`, `find`, and `PageObjects.common` +** See the https://theintern.github.io/leadfoot/module-leadfoot_Command.html[leadfoot/Command API] for full API + +[float] +===== Custom Services + +Services are intentionally generic. They can be literally anything (even nothing). Some services have helpers for interacting with a specific types of UI elements, like `pointSeriesVis`, and others are more foundational, like `log` or `config`. Whenever you want to provide some functionality in a reusable package, consider making a custom service. + +To create a custom service `somethingUseful`: + +* Create a `test/functional/services/something_useful.js` file that looks like this: ++ +["source","js"] +----------- +// Services are defined by Provider functions that receive the ServiceProviderAPI +export function SomethingUsefulProvider({ getService }) { + const log = getService('log'); + + class SomethingUseful { + doSomething() { + } + } + return new SomethingUseful(); +} +----------- ++ +* Re-export your provider from `services/index.js` +* Import it into `src/functional/config.js` and add it to the services config: ++ +["source","js"] +----------- +import { SomethingUsefulProvider } from './services'; + +export default function () { + return { + // … truncated ... + services: { + somethingUseful: SomethingUsefulProvider + } + } +} +----------- + +[float] +==== PageObjects + +The purpose for each PageObject is pretty self-explanatory. The visualize PageObject provides helpers for interacting with the visualize app, dashboard is the same for the dashboard app, and so on. + +One exception is the "common" PageObject. A holdover from the intern implementation, the common PageObject is a collection of helpers useful across pages. Now that we have shareable services, and those services can be shared with other `FunctionalTestRunner` configurations, we will continue to move functionality out of the common PageObject and into services. + +Please add new methods to existing or new services rather than further expanding the CommonPage class. + +[float] +==== Gotchas + +Remember that you can’t run an individual test in the file (`it` block) because the whole `describe` needs to be run in order. There should only be one top level `describe` in a file. + +[float] +===== Functional Test Timing + +Another important gotcha is writing stable tests by being mindful of timing. All methods on `remote` run asynchronously. It’s better to write interactions that wait for changes on the UI to appear before moving onto the next step. + +For example, rather than writing an interaction that simply clicks a button, write an interaction with the a higher-level purpose in mind: + +Bad example: `PageObjects.app.clickButton()` + +["source","js"] +----------- +class AppPage { + // what can people who call this method expect from the + // UI after the promise resolves? Since the reaction to most + // clicks is asynchronous the behavior is dependant on timing + // and likely to cause test that fail unexpectedly + async clickButton () { + await testSubjects.click(‘menuButton’); + } +} +----------- + +Good example: `PageObjects.app.openMenu()` + +["source","js"] +----------- +class AppPage { + // unlike `clickButton()`, callers of `openMenu()` know + // the state that the UI will be in before they move on to + // the next step + async openMenu () { + await testSubjects.click(‘menuButton’); + await testSubjects.exists(‘menu’); + } +} +----------- + +Writing in this way will ensure your test timings are not flaky or based on assumptions about UI updates after interactions. + +[float] +==== Debugging + +From the command line run: + +["source","shell"] +----------- +node --debug-brk --inspect scripts/functional_test_runner +----------- + +This prints out a URL that you can visit in Chrome and debug your functional tests in the browser. + +You can also see additional logs in the terminal by running the `FunctionalTestRunner` with the `--debug` or `--verbose` flag. Add more logs with statements in your tests like + +["source","js"] +----------- +// load the log service +const log = getService(‘log’); + +// log.debug only writes when using the `--debug` or `--verbose` flag. +log.debug(‘done clicking menu’); +----------- \ No newline at end of file diff --git a/docs/development/core/development-modules.asciidoc b/docs/development/core/development-modules.asciidoc index 38ef870ca7e51..eb08c698cf5e8 100644 --- a/docs/development/core/development-modules.asciidoc +++ b/docs/development/core/development-modules.asciidoc @@ -38,9 +38,7 @@ Kibana uses Webpack to bundle Kibana's dependencies. Here is how import/require statements are resolved to a file: -NOTE: if you're familiar with the node.js algorithm, the changes are in *2.ii* and *3.i.f* to *3.i.g* - -. Pick an algorithm +. Check the beginning of the module path * if the path starts with a '.' ** append it the directory of the current file ** proceed to *3* @@ -64,17 +62,11 @@ NOTE: if you're familiar with the node.js algorithm, the changes are in *2.ii* a * the first of the following paths that resolves to a **file** is our match ** path + '.js' ** path + '.json' - ** path + '.jsx' - ** path + '.less' ** path ** path/${basename(path)} + '.js' ** path/${basename(path)} + '.json' - ** path/${basename(path)} + '.jsx' - ** path/${basename(path)} + '.less' ** path/${basename(path)} ** path/index + '.js' ** path/index + '.json' - ** path/index + '.jsx' - ** path/index + '.less' ** path/index - * if none of the above paths matches then an error is thrown + * if none of the above paths matches then an error is thrown \ No newline at end of file diff --git a/docs/development/plugin-development.asciidoc b/docs/development/plugin-development.asciidoc index d86479685fafe..c930e5fbc92c9 100644 --- a/docs/development/plugin-development.asciidoc +++ b/docs/development/plugin-development.asciidoc @@ -8,8 +8,11 @@ The Kibana plugin interfaces are in a state of constant development. We cannot * <> * <> +* <> include::plugin/development-plugin-resources.asciidoc[] include::plugin/development-uiexports.asciidoc[] + +include::plugin/development-plugin-functional-tests.asciidoc[] diff --git a/docs/development/plugin/development-internationalization.asciidoc b/docs/development/plugin/development-internationalization.asciidoc index 2550187828838..621489e4a7461 100644 --- a/docs/development/plugin/development-internationalization.asciidoc +++ b/docs/development/plugin/development-internationalization.asciidoc @@ -32,8 +32,7 @@ Contributing Language Packs For this example, we will demonstrate translation into Maltese (Language code `mt`). Add-on functionality for Kibana is implemented with plug-in modules. Refer to -https://www.elastic.co/guide/en/kibana/current/kibana-plugins.html[Kibana -Plugins] for more details. +<> for more details. * Fork the `kibana` source, and ensure you have an up to date copy of the source. @@ -170,8 +169,8 @@ New Plugin Authors Add-on functionality for Kibana is implemented with plug-in modules. Refer to -https://www.elastic.co/guide/en/kibana/current/kibana-plugins.html[Kibana -Plugins] for more details. It is recommended that when creating a plugin +<> for more details. It is recommended that when creating a plugin you enable translations (see link:#Enabling%20Translations%20on%20Existing%20Plugins[Enabling Translations on Existing Plugins], above). [[enabling-translation-in-a-plugin]] diff --git a/docs/development/plugin/development-plugin-functional-tests.asciidoc b/docs/development/plugin/development-plugin-functional-tests.asciidoc new file mode 100644 index 0000000000000..aec24075f4368 --- /dev/null +++ b/docs/development/plugin/development-plugin-functional-tests.asciidoc @@ -0,0 +1,91 @@ +[[development-plugin-functional-tests]] +=== Functional Tests for Plugins + +Plugins use the `FunctionalTestRunner` by running it out of the Kibana repo. Ensure that your Kibana Development Environment is setup properly before continuing. + +[float] +==== Writing your own configuration + +Every project or plugin should have its own `FunctionalTestRunner` config file. Just like Kibana's, this config file will define all of the test files to load, providers for Services and PageObjects, as well as configuration options for certain services. + +To get started copy and paste this example to `test/functional/config.js`: + +["source","js"] +----------- +import { resolve } from 'path'; +import { MyServiceProvider } from './services/my_service'; +import { MyAppPageProvider } from './services/my_app_page; + +// allow overriding the default kibana directory +// using the KIBANA_DIR environment variable +const KIBANA_CONFIG_PATH = resolve(process.env.KIBANA_DIR || '../kibana', 'test/functional/config.js'); + +// the default export of config files must be a config provider +// that returns an object with the projects config values +export default async function ({ readConfigFile }) { + + // read the Kibana config file so that we can utilize some of + // its services and PageObjects + const kibanaConfig = await readConfigFile(KIBANA_CONFIG_PATH); + + return { + // list paths to the files that contain your plugins tests + testFiles: [ + resolve(__dirname, './my_test_file.js'), + ], + + // define the name and providers for services that should be + // available to your tests. If you don't specify anything here + // only the built-in services will be avaliable + services: { + ...kibanaConfig.get('services'), + myService: MyServiceProvider, + }, + + // just like services, PageObjects are defined as a map of + // names to Providers. Merge in Kibana's or pick specific ones + pageObjects: { + management: kibanaConfig.get('pageObjects.management'), + myApp: MyAppPageProvider, + }, + + // the apps section defines the urls that + // `PageObjects.common.navigateTo(appKey)` will use. + // Merge urls for your plugin with the urls defined in + // Kibana's config in order to use this helper + apps: { + ...kibanaConfig.get('apps'), + myApp: { + pathname: '/app/my_app', + } + }, + + // choose where esArchiver should load archives from + esArchiver: { + directory: resolve(__dirname, './es_archives'), + }, + + // choose where screenshots should be saved + screenshots: { + directory: resolve(__dirname, './tmp/screenshots'), + } + + // more settings, like timeouts, mochaOpts, etc are + // defined in the config schema. See {blob}src/functional_test_runner/lib/config/schema.js[src/functional_test_runner/lib/config/schema.js] + }; +} + +----------- + +From the root of your repo you should now be able to run the `FunctionalTestRunner` script from your plugin project. + +["source","shell"] +----------- +node ../kibana/scripts/functional_test_runner +----------- + +[float] +==== Using esArchiver + +We're working on documentation for this, but for now the best place to look is the original {pull}10359[pull request]. + diff --git a/docs/discover.asciidoc b/docs/discover.asciidoc index bbecf6b8b9afc..32b4c4f9e042d 100644 --- a/docs/discover.asciidoc +++ b/docs/discover.asciidoc @@ -9,7 +9,7 @@ You can also see the number of documents that match the search query and get fie configured for the selected index pattern, the distribution of documents over time is displayed in a histogram at the top of the page. -image::images/Discover-Start-Annotated.jpg[Discover] +image::images/Discover-Start-Annotated.png[Discover] -- include::discover/set-time-filter.asciidoc[] @@ -20,4 +20,6 @@ include::discover/field-filter.asciidoc[] include::discover/document-data.asciidoc[] +include::discover/context.asciidoc[] + include::discover/viewing-field-stats.asciidoc[] diff --git a/docs/discover/context.asciidoc b/docs/discover/context.asciidoc new file mode 100644 index 0000000000000..2492222ae132f --- /dev/null +++ b/docs/discover/context.asciidoc @@ -0,0 +1,86 @@ +[[document-context]] +== Viewing Document Context + +For certain applications it can be useful to inspect a window of documents +surrounding a specific event. The context view enables you to do just that for +index patterns that are configured to contain time-based events. + +To show the context surrounding an anchor document, click the *Expand* button +image:images/ExpandButton.jpg[Expand Button] to the left of the document's +table entry and then click the *View surrounding documents* link. + +image::images/Expanded-Document.png[Expanded Document] +{nbsp} + +The context view displays a number of documents before and after the anchor +document. The anchor document itself is highlighted in blue. The view is sorted +by the time field specified in the index pattern configuration and uses the +same set of columns as the Discover view the context was opened from. If there +are multiple documents with the same time field value, the internal document +order is used as a secondary sorting criterion by default. + +[NOTE] +-- +The field used for tiebreaking in case of equal time field values can be +configured using the advanced setting `context:tieBreakerFields` in +< Advanced Settings*>>, which defaults to the +`_doc` field. The value of this setting can be a comma-separated list of field +names, which will be checked in sequence for suitability when a context is +about to be displayed. The first suitable field is then used as the tiebreaking +field. A field is suitable if the field exists and is sortable in the index +pattern the context is based on. + +While not required, it is recommended to only +use fields which have {ref}/doc-values.html[doc values] enabled to achieve +good performance and avoid unnecessary {ref}/modules-fielddata.html[field +data] usage. Common examples for suitable fields include log line numbers, +monotonically increasing counters and high-precision timestamps. +-- + +image::images/Discover-ContextView.png[Context View] + +NOTE: The number of documents displayed by default can be configured +via the `context:defaultSize` setting in < +Advanced Settings*>>. + +=== Changing the Context Size + +You can change the number documents displayed before and after the anchor +document independently. + +To increase the number of displayed documents that are newer than the anchor +document, click the *Load 5 more* button above the document list or enter the +desired number into the input box right of the button. + +image::images/Discover-ContextView-SizePicker-Newer.png[] +{nbsp} + +To increase the number of displayed documents that are older than the anchor +document, click the *Load 5 more* button below the document list or enter the +desired number into the input box right of the button. + +image::images/Discover-ContextView-SizePicker-Older.png[] +{nbsp} + +NOTE: The default number of documents loaded with each button click can be +configured via the `context:step` setting in < +Advanced Settings*>>. + +=== Filtering the Context + +Depending on how the documents are partitioned into index patterns, the context +view might contain a large number of documents not related to the event under +investigation. In order to adapt the focus of the context view to the task at +hand, you can use filters to restrict the documents considered by Kibana for +display in the context view. + +When switching from the discover view to the context view, the previously +applied filters are carried over. Pinned filters remain active while normal +filters are copied in a disabled state. You can selectively re-enabled them to +refine your context view. + +New filters can be added via the *Add a filter* link in the filter bar, by +clicking the filter icons appearing when hovering a field, or by expanding +documents and clicking the filter icons in the table. + +image::images/Discover-ContextView-FilterMontage.png[] diff --git a/docs/discover/document-data.asciidoc b/docs/discover/document-data.asciidoc index a6b3fc3bade03..a85489a947cea 100644 --- a/docs/discover/document-data.asciidoc +++ b/docs/discover/document-data.asciidoc @@ -18,8 +18,9 @@ image::images/Expanded-Document.png[] To view the original JSON document (pretty-printed), click the *JSON* tab. -To view the document data as a separate page, click the document link. You can -bookmark and share this link to provide direct access to a particular document. +To view the document data as a separate page, click the *View single document* +link. You can bookmark and share this link to provide direct access to a +particular document. To display or hide a field's column in the Documents table, click the image:images/add-column-button.png[Add Column] *Toggle column in table* button. diff --git a/docs/discover/field-filter.asciidoc b/docs/discover/field-filter.asciidoc index d17499979ea74..e9b05f6a0fbe8 100644 --- a/docs/discover/field-filter.asciidoc +++ b/docs/discover/field-filter.asciidoc @@ -4,15 +4,15 @@ You can filter the search results to display only those documents that contain a particular value in a field. You can also create negative filters that exclude documents that contain the specified field value. -You add field filters from the Fields list or the Documents table. In addition -to creating positive and negative filters, the Documents table enables you to -filter on whether or not a field is present. The applied -filters are shown below the Query bar. Negative filters are shown in red. +You add field filters from the Fields list, the Documents table, or by manually +adding a filter. In addition to creating positive and negative filters, the +Documents table enables you to filter on whether or not a field is present. The +applied filters are shown below the Query bar. Negative filters are shown in red. To add a filter from the Fields list: . Click the name of the field you want to filter on. This displays the top -five values for that field. +five values for that field. + image::images/filter-field.jpg[] . To add a positive filter, click the *Positive Filter* button @@ -26,7 +26,7 @@ To add a filter from the Documents table: . Expand a document in the Documents table by clicking the *Expand* button image:images/ExpandButton.jpg[Expand Button] to the left of the document's -table entry. +table entry. + image::images/Expanded-Document.png[] . To add a positive filter, click the *Positive Filter* button @@ -40,6 +40,40 @@ field name. This excludes documents that contain that value in the field. *Exists* button image:images/ExistsButton.jpg[Exists Button] to the right of the field name. This includes only those documents that contain the field. +To manually add a filter: + +. Click *Add Filter*. A popup will be displayed for you to create the filter. ++ +image::images/add_filter.png[] +. Choose a field to filter by. This list of fields will include fields from the +index pattern you are currently querying against. ++ +image::images/add_filter_field.png[] +. Choose an operation for your filter. ++ +image::images/add_filter_operator.png[] +The following operators can be selected: +[horizontal] +`is`:: Filter where the value for the field matches the given value. +`is not`:: Filter where the value for the field does not match the given value. +`is one of`:: Filter where the value for the field matches one of the specified values. +`is not one of`:: Filter where the value for the field does not match any of the specified values. +`is between`:: Filter where the value for the field is in the given range. +`is not between`:: Filter where the value for the field is not in the given range. +`exists`:: Filter where any value is present for the field. +`does not exist`:: Filter where no value is present for the field. +. Choose the value(s) for your filter. ++ +image::images/add_filter_value.png[] +. (Optional) Specify a label for the filter. If you specify a label, it will be +displayed below the query bar instead of the filter definition. +. Click *Save*. The filter will be applied to your search and be displayed below +the query bar. + +NOTE: To make the filter editor more user-friendly, you can enable the `filterEditor:suggestValues` advanced setting. +Enabling this will cause the editor to suggest values from your indices if you are filtering against an aggregatable +field. However, this is not recommended for extremely large datasets, as it can result in long queries. + [float] [[filter-pinning]] === Managing Filters @@ -55,33 +89,38 @@ removing it. Click again to reenable the filter. Diagonal stripes indicate that a filter is disabled. image:images/filter-pin.png[] Pin Filter :: Pin the filter. Pinned filters persist when you switch contexts in Kibana. For example, you can pin a filter -in Discover and it remains in place when you switch to Visualize. +in Discover and it remains in place when you switch to Visualize. Note that a filter is based on a particular index field--if the indices being -searched don't contain the field in a pinned filter, it has no effect. -image:images/filter-toggle.png[] Toggle Filter :: Switch from a positive -filter to a negative filter and vice-versa. +searched don't contain the field in a pinned filter, it has no effect. +image:images/filter-toggle.png[] Invert Filter :: Switch from a positive +filter to a negative filter and vice-versa. image:images/filter-delete.png[] Remove Filter :: Remove the filter. -image:images/filter-custom.png[] Edit Filter :: <> definition. Enables you to manually update the filter query and +image:images/filter-custom.png[] Edit Filter :: <> definition. Enables you to manually update the filter and specify a label for the filter. -To apply a filter action to all of the applied filters, +To apply a filter action to all of the applied filters, click *Actions* and select the action. [float] [[filter-edit]] === Editing a Filter -You can edit a filter to directly modify the filter query that is performed -to filter your search results. This enables you to create more complex -filters that are based on multiple fields. - -image::images/filter-custom-json.png[] +You can edit a filter by changing the field, operator, or value associated +with the filter (see the Add Filter section above), or by directly modifying +the filter query that is performed to filter your search results. This +enables you to create more complex filters that are based on multiple fields. -  +. To edit the filter query, first click the edit button for the filter, then +click *Edit Query DSL*. ++ +image::images/edit_filter_query.png[] +. You can then edit the query for the filter. ++ +image::images/edit_filter_query_json.png[] -For example, you could use a {es-ref}/query-dsl-bool-query.html[bool query] -to create a filter for the sample log data that displays the hits that -originated from Canada or China that resulted in a 404 error: +For example, you could use a +{ref}/query-dsl-bool-query.html[bool query] to create a filter for the +sample log data that displays the hits that originated from Canada or China that resulted in a 404 error: ========== [source,json] @@ -108,4 +147,4 @@ originated from Canada or China that resulted in a 404 error: ] } } -========== \ No newline at end of file +========== diff --git a/docs/discover/kuery.asciidoc b/docs/discover/kuery.asciidoc new file mode 100644 index 0000000000000..af19622484485 --- /dev/null +++ b/docs/discover/kuery.asciidoc @@ -0,0 +1,115 @@ +[[kuery-query]] +=== Kuery + +experimental[This functionality is experimental and may be changed or removed completely in a future release.] + +Kuery is a new query language built specifically for Kibana. It aims to simplify the search experience in Kibana +and enable the creation of helpful features like auto-complete, seamless migration of saved searches, additional +query types, and more. Kuery is a basic experience today but we're hard at work building these additional features on +top of the foundation Kuery provides. + +Kueries are built with functions. Many functions take a field name as their first argument. Extremely common functions have shorthand notations. + +`is("response", 200)` will match documents where the response field matches the value 200. +`response:200` does the same thing. `:` is an alias for the `is` function. + +Multiple search terms are separated by whitespace. + +`response:200 extension:php` will match documents where response matches 200 and extension matches php. + +All terms must match by default. The language supports boolean logic with and/or operators. The above query is equivalent to `response:200 and extension:php`. + +We can make terms optional by using `or`. + +`response:200 or extension:php` will match documents where response matches 200, extension matches php, or both. + +By default, `and` has a higher precedence than `or`. + +`response:200 and extension:php or extension:css` will match documents where response is 200 and extension is php OR documents where extension is css and response is anything. + +We can override the default precedence with grouping. + +`response:200 and (extension:php or extension:css)` will match documents where response is 200 and extension is either php or css. + +Terms can be inverted by prefixing them with `!`. + +`!response:200` will match all documents where response is not 200. + +Entire groups can also be inverted. + +`response:200 and !(extension:php or extension:css)` + +Some query functions have named arguments. + +`range("bytes", gt=1000, lt=8000)` will match documents where the bytes field is greater than 1000 and less than 8000. + +Quotes are generally optional if your terms don't have whitespace or special characters. `range(bytes, gt=1000, lt=8000)` +would also be a valid query. + +[NOTE] +============ +Terms without fields will be matched against all fields. For example, a query for `response:200` will search for the value 200 +in the response field, but a query for just `200` will search for 200 across all fields in your index. +============ + +==== Function Reference + +[horizontal] +Function Name:: Description + +and:: +Purpose::: Match all given sub-queries +Alias::: `and` as a binary operator +Examples::: +* `and(response:200, extension:php)` +* `response:200 and extension:php` + +or:: +Purpose::: Match one or more sub-queries +Alias::: `or` as a binary operator +Examples::: +* `or(extension:css, extension:php)` +* `extension:css or extension:php` + +not:: +Purpose::: Negates a sub-query +Alias::: `!` as a prefix operator +Examples::: +* `not(response:200)` +* `!response:200` + +is:: +Purpose::: Matches a field with a given term +Alias::: `:` +Examples::: +* `is("response", 200)` +* `response:200` + +range:: +Purpose::: Match a field against a range of values. +Alias::: `:[]` +Examples::: +* `range("bytes", gt=1000, lt=8000)` +* `bytes:[1000 to 8000]` +Named arguments::: +* `gt` - greater than +* `gte` - greater than or equal to +* `lt` - less than +* `lte` - less than or equal to + +exists:: +Purpose::: Match documents where a given field exists +Examples::: `exists("response")` + +geoBoundingBox:: +Purpose::: Creates a geo_bounding_box query +Examples::: +* `geoBoundingBox("coordinates", topLeft="40.73, -74.1", bottomRight="40.01, -71.12")` (whitespace between lat and lon is ignored) +Named arguments::: +* `topLeft` - the top left corner of the bounding box as a "lat, lon" string +* `bottomRight` - the bottom right corner of the bounding box as a "lat, lon" string + +geoPolygon:: +Purpose::: Creates a geo_polygon query given 3 or more points as "lat, lon" +Examples::: +* `geoPolygon("geo.coordinates", "40.97, -127.26", "24.20, -84.375", "40.44, -66.09")` \ No newline at end of file diff --git a/docs/discover/search.asciidoc b/docs/discover/search.asciidoc index 20a626681ccbd..f63680d15255c 100644 --- a/docs/discover/search.asciidoc +++ b/docs/discover/search.asciidoc @@ -1,10 +1,10 @@ [[search]] == Searching Your Data You can search the indices that match the current index pattern by entering -your search criteria in the Query bar. You can perform a simple text search, -use the Lucene https://lucene.apache.org/core/2_9_4/queryparsersyntax.html[ -query syntax], or use the full JSON-based {es-ref}query-dsl.html[Elasticsearch -Query DSL]. +your search criteria in the Query bar. You can use the Lucene +https://lucene.apache.org/core/2_9_4/queryparsersyntax.html[ +query syntax], the full JSON-based {ref}/query-dsl.html[Elasticsearch +Query DSL] or Kuery, an experimental new query language built specifically for Kibana. When you submit a search request, the histogram, Documents table, and Fields list are updated to reflect the search results. The total number of hits @@ -19,6 +19,17 @@ To search your data, enter your search criteria in the Query bar and press *Enter* or click *Search* image:images/search-button.jpg[] to submit the request to Elasticsearch. +[NOTE] +=========== +By default, Kibana will accept either the Lucene query syntax or the +Elasticsearch Query DSL in the Query bar. In order to use the new Kuery +language you must enable language switching in *Management > Advanced Settings* +via the `search:queryLanguage:switcher:enable` option. You can also change the +default language with the `search:queryLanguage` setting. +=========== + +[[lucene-query]] +=== Lucene Query Syntax * To perform a free text search, simply enter a text string. For example, if you're searching web server logs, you could enter `safari` to search all fields for the term `safari`. @@ -36,54 +47,56 @@ status codes, you could enter `status:[400 TO 499]`. codes and have an extension of `php` or `html`, you could enter `status:[400 TO 499] AND (extension:php OR extension:html)`. -NOTE: These examples use the Lucene query syntax. You can also submit queries -using the Elasticsearch Query DSL. For examples, see -{es-ref}query-dsl-query-string-query.html#query-string-syntax[query string syntax] -in the Elasticsearch Reference. +For more detailed information about the Lucene query syntax, see the +{ref}/query-dsl-query-string-query.html#query-string-syntax[Query String Query] +docs. -[float] -[[save-search]] -=== Saving a Search +NOTE: These examples use the Lucene query syntax. When lucene is selected as your +query language you can also submit queries using the {ref}/query-dsl.html[Elasticsearch Query DSL]. + +include::kuery.asciidoc[] + +[[save-open-search]] +=== Saving and Opening Searches Saving searches enables you to reload them into Discover and use them as the basis for <>. Saving a search saves both the search query string and the currently selected index pattern. +==== Saving a Search To save the current search: . Click *Save* in the Kibana toolbar. . Enter a name for the search and click *Save*. -You can import, export and delete saved searches from *Management/Kibana/Saved Objects*. +You can import, export and delete saved searches from *Management/Kibana/Saved Objects*. -[float] -[[load-search]] -=== Opening a Saved Search +==== Opening a Saved Search To load a saved search into Discover: . Click *Open* in the Kibana toolbar. . Select the search you want to open. If the saved search is associated with a different index pattern than is currently -selected, opening the saved search also changes the selected index pattern. +selected, opening the saved search changes the selected index pattern. The query language +used for the saved search will also be automatically selected. + -[float] [[select-pattern]] === Changing Which Indices You're Searching When you submit a search request, the indices that match the currently-selected index pattern are searched. The current index pattern is shown below the toolbar. To change which indices you are searching, click the index pattern and select a -different index pattern. +different index pattern. For more information about index patterns, see <>. -[float] [[autorefresh]] === Refreshing the Search Results As more documents are added to the indices you're searching, the search results shown in Discover and used to display visualizations get stale. You can configure a refresh interval to periodically resubmit your searches to -retrieve the latest results. +retrieve the latest results. To enable auto refresh: @@ -100,5 +113,3 @@ click *Pause*. NOTE: If auto refresh is not enabled, you can manually refresh visualizations by clicking *Refresh*. - - diff --git a/docs/discover/set-time-filter.asciidoc b/docs/discover/set-time-filter.asciidoc index c90ccc69e95cc..26998af00b9dc 100644 --- a/docs/discover/set-time-filter.asciidoc +++ b/docs/discover/set-time-filter.asciidoc @@ -16,7 +16,8 @@ To set a time filter with the Time Picker: image::images/time-filter.jpg[Time filter shortcuts] . To specify a time filter relative to the current time, click *Relative* and specify the start time as a number of seconds, minutes, hours, days, -months, or years ago. +months, or years. You can also specify the end time relative +to the current time. Relative times can be in the past or future. + image::images/time-filter-relative.jpg[Relative time filter] . To specify both the start and end times for the time filter, click @@ -33,8 +34,12 @@ To set a time filter from the histogram, do one of the following: the cursor over the background of the chart--the cursor changes to a plus sign when you hover over a valid start point. +To move forward/backward in time, click the arrows to the left or right of the Time Picker: + +image::images/time-picker-step.jpg[Move backwards in time] + You can use the browser Back button to undo your changes. The displayed time range and interval are shown on the histogram. By default, the interval is set automatically based on the time range. To use a different -interval, click the link and select an interval. +interval, click the link and select an interval. diff --git a/docs/getting-started.asciidoc b/docs/getting-started.asciidoc index b88c999612034..582578600b6c6 100644 --- a/docs/getting-started.asciidoc +++ b/docs/getting-started.asciidoc @@ -8,18 +8,18 @@ This tutorial shows you how to: * Load a sample data set into Elasticsearch * Define an index pattern -* Explore the sample data with <> -* Set up <> of the sample data -* Assemble visualizations into a <> +* Explore the sample data with {kibana-ref}/discover.html[Discover] +* Set up {kibana-ref}/visualize.html[_visualizations_] of the sample data +* Assemble visualizations into a {kibana-ref}/dashboard.html[Dashboard] Before you begin, make sure you've <> and established -a <>. +a {kibana-ref}/connect-to-elasticsearch.html[connection to Elasticsearch]. You might also be interested in these video tutorials: * https://www.elastic.co/blog/kibana-4-video-tutorials-part-1[High-level Kibana introduction, pie charts] * https://www.elastic.co/blog/kibana-4-video-tutorials-part-2[Data discovery, bar charts, and line charts] -* https://www.elastic.co/blog/kibana-4-video-tutorials-part-3[Tile maps] +* https://www.elastic.co/blog/kibana-4-video-tutorials-part-3[Coordinate maps] * https://www.elastic.co/blog/kibana-4-video-tutorials-part-4[Embedding Kibana visualizations] -- diff --git a/docs/getting-started/tutorial-define-index.asciidoc b/docs/getting-started/tutorial-define-index.asciidoc index b8de9e9fe26cb..821d979104997 100644 --- a/docs/getting-started/tutorial-define-index.asciidoc +++ b/docs/getting-started/tutorial-define-index.asciidoc @@ -8,7 +8,7 @@ case, a typical index name contains the date in YYYY.MM.DD format, and an index like `logstash-2015.05*`. For this tutorial, any pattern that matches the name of an index we've loaded will work. Open a browser and -navigate to `localhost:5601`. Click the *Settings* tab, then the *Indices* tab. Click *Add New* to define a new index +navigate to `localhost:5601`. Click the *Management* tab, then the *Index Patterns* tab. Click *Add New* to define a new index pattern. Two of the sample data sets, the Shakespeare plays and the financial accounts, don't contain time-series data. Make sure the *Index contains time-based events* box is unchecked when you create index patterns for these data sets. Specify `shakes*` as the index pattern for the Shakespeare data set and click *Create* to define the index pattern, then diff --git a/docs/getting-started/tutorial-discovering.asciidoc b/docs/getting-started/tutorial-discovering.asciidoc index c825391962e72..a0d7655f00dfd 100644 --- a/docs/getting-started/tutorial-discovering.asciidoc +++ b/docs/getting-started/tutorial-discovering.asciidoc @@ -5,10 +5,10 @@ Click *Discover* in the side navigation to display Kibana's data discovery funct image::images/tutorial-discover.png[] -In the query bar, you can enter an -{es-ref}query-dsl-query-string-query.html#query-string-syntax[Elasticsearch +In the query bar, you can enter an +{ref}/query-dsl-query-string-query.html#query-string-syntax[Elasticsearch query] to search your data. You can explore the results in Discover and create -visualizations of saved searches in Visualize. +visualizations of saved searches in Visualize. The current index pattern is displayed beneath the query bar. The index pattern determines which indices are searched when you submit a query. To search a @@ -28,7 +28,7 @@ in the query bar: account_number:<100 AND balance:>47500 This query returns all account numbers between zero and 99 with balances in -excess of 47,500. When searching the sample bank data, it returns 5 results: +excess of 47,500. When searching the sample bank data, it returns 5 results: Account numbers 8, 32, 78, 85, and 97. image::images/tutorial-discover-2.png[] diff --git a/docs/getting-started/tutorial-load-dataset.asciidoc b/docs/getting-started/tutorial-load-dataset.asciidoc index e7233e13c5dd9..2eef175490e15 100644 --- a/docs/getting-started/tutorial-load-dataset.asciidoc +++ b/docs/getting-started/tutorial-load-dataset.asciidoc @@ -4,7 +4,7 @@ The tutorials in this section rely on the following data sets: * The complete works of William Shakespeare, suitably parsed into fields. Download this data set by clicking here: - https://download.elastic.co/demos/kibana/gettingstarted/shakespeare.json[shakespeare.json]. + https://download.elastic.co/demos/kibana/gettingstarted/shakespeare_6.0.json[shakespeare.json]. * A set of fictitious accounts with randomly generated data. Download this data set by clicking here: https://download.elastic.co/demos/kibana/gettingstarted/accounts.zip[accounts.zip] * A set of randomly generated log files. Download this data set by clicking here: @@ -54,33 +54,32 @@ The schema for the logs data set has dozens of different fields, but the notable "@timestamp": "date" } -Before we load the Shakespeare and logs data sets, we need to set up {es-ref}mapping.html[_mappings_] for the fields. +Before we load the Shakespeare and logs data sets, we need to set up {ref}/mapping.html[_mappings_] for the fields. Mapping divides the documents in the index into logical groups and specifies a field's characteristics, such as the field's searchability or whether or not it's _tokenized_, or broken up into separate words. Use the following command in a terminal (eg `bash`) to set up a mapping for the Shakespeare data set: -[source,shell] -curl -XPUT http://localhost:9200/shakespeare -d ' +[source,js] +PUT /shakespeare { - "mappings" : { - "_default_" : { - "properties" : { - "speaker" : {"type": "string", "index" : "not_analyzed" }, - "play_name" : {"type": "string", "index" : "not_analyzed" }, - "line_id" : { "type" : "integer" }, - "speech_number" : { "type" : "integer" } + "mappings": { + "doc": { + "properties": { + "speaker": {"type": "keyword"}, + "play_name": {"type": "keyword"}, + "line_id": {"type": "integer"}, + "speech_number": {"type": "integer"} } } } } -'; + +//CONSOLE This mapping specifies the following qualities for the data set: -* The _speaker_ field is a string that isn't analyzed. The string in this field is treated as a single unit, even if -there are multiple words in the field. -* The same applies to the _play_name_ field. +* Because the _speaker_ and _play_name_ fields are keyword fields, they are not analyzed. The strings are treated as a single unit even if they contain multiple words. * The _line_id_ and _speech_number_ fields are integers. The logs data set requires a mapping to label the latitude/longitude pairs in the logs as geographic locations by @@ -88,8 +87,8 @@ applying the `geo_point` type to those fields. Use the following commands to establish `geo_point` mapping for the logs: -[source,shell] -curl -XPUT http://localhost:9200/logstash-2015.05.18 -d ' +[source,js] +PUT /logstash-2015.05.18 { "mappings": { "log": { @@ -105,10 +104,11 @@ curl -XPUT http://localhost:9200/logstash-2015.05.18 -d ' } } } -'; -[source,shell] -curl -XPUT http://localhost:9200/logstash-2015.05.19 -d ' +//CONSOLE + +[source,js] +PUT /logstash-2015.05.19 { "mappings": { "log": { @@ -124,10 +124,11 @@ curl -XPUT http://localhost:9200/logstash-2015.05.19 -d ' } } } -'; -[source,shell] -curl -XPUT http://localhost:9200/logstash-2015.05.20 -d ' +//CONSOLE + +[source,js] +PUT /logstash-2015.05.20 { "mappings": { "log": { @@ -143,22 +144,25 @@ curl -XPUT http://localhost:9200/logstash-2015.05.20 -d ' } } } -'; + +//CONSOLE The accounts data set doesn't require any mappings, so at this point we're ready to use the Elasticsearch -{es-ref}docs-bulk.html[`bulk`] API to load the data sets with the following commands: +{ref}/docs-bulk.html[`bulk`] API to load the data sets with the following commands: [source,shell] -curl -XPOST 'localhost:9200/bank/account/_bulk?pretty' --data-binary @accounts.json -curl -XPOST 'localhost:9200/shakespeare/_bulk?pretty' --data-binary @shakespeare.json -curl -XPOST 'localhost:9200/_bulk?pretty' --data-binary @logs.jsonl +curl -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/bank/account/_bulk?pretty' --data-binary @accounts.json +curl -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/shakespeare/doc/_bulk?pretty' --data-binary @shakespeare_6.0.json +curl -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/_bulk?pretty' --data-binary @logs.jsonl These commands may take some time to execute, depending on the computing resources available. Verify successful loading with the following command: -[source,shell] -curl 'localhost:9200/_cat/indices?v' +[source,js] +GET /_cat/indices?v + +//CONSOLE You should see output similar to the following: diff --git a/docs/getting-started/tutorial-visualizing.asciidoc b/docs/getting-started/tutorial-visualizing.asciidoc index c2a599bebb249..9571f84e53071 100644 --- a/docs/getting-started/tutorial-visualizing.asciidoc +++ b/docs/getting-started/tutorial-visualizing.asciidoc @@ -1,27 +1,34 @@ [[tutorial-visualizing]] == Visualizing Your Data -To start visualizing your data, click *Visualize* in the side navigation: - -image::images/tutorial-visualize.png[] +To start visualizing your data, click *Visualize* in the side navigation. The *Visualize* tools enable you to view your data in several ways. For example, let's use that venerable visualization, the pie chart, to get some insight -into the account balances in the sample bank account data. +into the account balances in the sample bank account data. To get started, click the big blue +**Create a visualization** button in the center of the screen. + +image::images/tutorial-visualize-landing.png[] + +There are a number of visualization types to choose from. Let's click the one +called *Pie*. -To get started, click *Pie chart* in the list of visualizations. You can build -visualizations from saved searches, or enter new search criteria. To enter +image::images/tutorial-visualize-wizard-step-1.png[] + +You can build visualizations from saved searches, or enter new search criteria. To enter new search criteria, you first need to select an index pattern to specify what indices to search. We want to search the account data, so select the `ba*` index pattern. +image::images/tutorial-visualize-wizard-step-2.png[] + The default search matches all documents. Initially, a single "slice" encompasses the entire pie: image::images/tutorial-visualize-pie-1.png[] -To specify what slices to display in the chart, you use an Elasticsearch -{es-ref}search-aggregations.html[bucket aggregation]. A bucket aggregation +To specify what slices to display in the chart, you use an Elasticsearch +{ref}/search-aggregations.html[bucket aggregation]. A bucket aggregation simply sorts the documents that match your search criteria into different categories, aka _buckets_. For example, the account data includes the balance of each account. Using a bucket aggregation, you can establish multiple ranges @@ -33,7 +40,7 @@ To define a bucket for each range: . Select *Range* from the *Aggregation* list. . Select the *balance* field from the *Field* list. . Click *Add Range* four times to bring the -total number of ranges to six. +total number of ranges to six. . Define the following ranges: + [source,text] @@ -56,10 +63,10 @@ age. By adding another bucket aggregation, you can see the ages of the account holders in each balance range: . Click *Add sub-buckets* below the buckets list. -. Click *Split Slices* in the buckets type list. +. Click *Split Slices* in the buckets type list. . Select *Terms* from the aggregation list. . Select *age* from the field list. -. Click *Apply changes* image:images/apply-changes-button.png[]. +. Click *Apply changes* image:images/apply-changes-button.png[]. Now you can see the break down of the account holders' ages displayed in a ring around the balance ranges. @@ -72,7 +79,7 @@ Next, we're going to look at data in the Shakespeare data set. Let's find out ho plays compare when it comes to the number of speaking parts and display the information in a bar chart: -. Click *New* and select *Vertical bar chart*. +. Click *New* and select *Vertical bar chart*. . Select the `shakes*` index pattern. Since you haven't defined any buckets yet, you'll see a single big bar that shows the total count of documents that match the default wildcard query. @@ -80,13 +87,13 @@ the default wildcard query. image::images/tutorial-visualize-bar-1.png[] . To show the number of speaking parts per play along the y-axis, you need to -configure the Y-axis {es-ref}search-aggregations.html[metric aggregation]. A metric +configure the Y-axis {ref}/search-aggregations.html[metric aggregation]. A metric aggregation computes metrics based on values extracted from the search results. To get the number of speaking parts per play, select the *Unique Count* aggregation and choose *speaker* from the field list. You can also give the axis a custom label, _Speaking Parts_. -. To show the different plays long the x-axis, select the X-Axis buckets type, +. To show the different plays long the x-axis, select the X-Axis buckets type, select *Terms* from the aggregation list, and choose *play_name* from the field list. To list them alphabetically, select *Ascending* order. You can also give the axis a custom label, _Play Name_. @@ -104,7 +111,7 @@ Hovering over each bar shows you the number of speaking parts for each play as a off and configure other options for your visualizations, select the Visualization builder's *Options* tab. Now that you have a list of the smallest casts for Shakespeare plays, you might also be curious to see which of these -plays makes the greatest demands on an individual actor by showing the maximum number of speeches for a given part. +plays makes the greatest demands on an individual actor by showing the maximum number of speeches for a given part. . Click *Add metrics* to add a Y-axis aggregation. . Choose the *Max* aggregation and select the *speech_number* field. @@ -122,11 +129,11 @@ make the differences stand out, starting the Y-axis at a value closer to the min Save this chart with the name _Bar Example_. -Next, we're going to use a tile map chart to visualize geographic information in our log file sample data. +Next, we're going to use a coordinate map chart to visualize geographic information in our log file sample data. . Click *New*. -. Select *Tile map*. -. Select the `logstash-*` index pattern. +. Select *Coordinate map*. +. Select the `logstash-*` index pattern. . Set the time window for the events we're exploring: . Click the time picker in the Kibana toolbar. . Click *Absolute*. @@ -134,8 +141,8 @@ Next, we're going to use a tile map chart to visualize geographic information in + image::images/tutorial-timepicker.png[] -. Once you've got the time range set up, click the *Go* button and close the time picker by -clicking the small up arrow in the bottom right corner. +. Once you've got the time range set up, click the *Go* button and close the time picker by +clicking the small up arrow in the bottom right corner. You'll see a map of the world, since we haven't defined any buckets yet: @@ -147,14 +154,14 @@ Your chart should now look like this: image::images/tutorial-visualize-map-2.png[] -You can navigate the map by clicking and dragging, zoom with the -image:images/viz-zoom.png[] buttons, or hit the *Fit Data Bounds* +You can navigate the map by clicking and dragging, zoom with the +image:images/viz-zoom.png[] buttons, or hit the *Fit Data Bounds* image:images/viz-fit-bounds.png[] button to zoom to the lowest level that includes all the points. You can also include or exclude a rectangular area by clicking the *Latitude/Longitude Filter* image:images/viz-lat-long-filter.png[] button and drawing a bounding box on the map. Applied filters are displayed below the query bar. Hovering over a filter displays controls to toggle, -pin, invert, or delete the filter. +pin, invert, or delete the filter. image::images/tutorial-visualize-map-3.png[] diff --git a/docs/getting-started/wrapping-up.asciidoc b/docs/getting-started/wrapping-up.asciidoc index 94aba02787870..6ba43345899c1 100644 --- a/docs/getting-started/wrapping-up.asciidoc +++ b/docs/getting-started/wrapping-up.asciidoc @@ -4,11 +4,11 @@ Now that you have a handle on the basics, you're ready to start exploring your own data with Kibana. -* See <> for more information about searching and filtering +* See {kibana-ref}/discover.html[Discover] for more information about searching and filtering your data. -* See <> for information about all of the visualization +* See {kibana-ref}/visualize.html[Visualize] for information about all of the visualization types Kibana has to offer. -* See <> for information about configuring Kibana +* See {kibana-ref}/management.html[Management] for information about configuring Kibana and managing your saved objects. -* See <> for information about the interactive +* See {kibana-ref}/console-kibana.html[Console] for information about the interactive console UI you can use to submit REST requests to Elasticsearch. diff --git a/docs/gs-index.asciidoc b/docs/gs-index.asciidoc new file mode 100644 index 0000000000000..a9c7bc3ae37ad --- /dev/null +++ b/docs/gs-index.asciidoc @@ -0,0 +1,27 @@ +[[kibana-guide]] += Kibana User Guide + +////////// +release-state can be: released | prerelease | unreleased +////////// +:release-state: released +:version: 5.4.0 +:major-version: 5.4 +:branch: 5.4 + +:docker-image: docker.elastic.co/kibana/kibana:{version} +:es-ref: https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ +:kibana-ref: https://www.elastic.co/guide/en/kibana/{branch} +:xpack-ref: https://www.elastic.co/guide/en/x-pack/current/ +:repo: https://github.com/elastic/kibana/ +:issue: {repo}issues/ +:pull: {repo}pull/ +:commit: {repo}commit/ +:security: https://www.elastic.co/community/security/ + + +include::introduction.asciidoc[] + +include::setup/install.asciidoc[] + +include::getting-started.asciidoc[] diff --git a/docs/images/Discover-ContextView-FilterMontage.png b/docs/images/Discover-ContextView-FilterMontage.png new file mode 100644 index 0000000000000..c990d314a6ba1 Binary files /dev/null and b/docs/images/Discover-ContextView-FilterMontage.png differ diff --git a/docs/images/Discover-ContextView-SizePicker-Newer.png b/docs/images/Discover-ContextView-SizePicker-Newer.png new file mode 100644 index 0000000000000..852cb22c1f27c Binary files /dev/null and b/docs/images/Discover-ContextView-SizePicker-Newer.png differ diff --git a/docs/images/Discover-ContextView-SizePicker-Older.png b/docs/images/Discover-ContextView-SizePicker-Older.png new file mode 100644 index 0000000000000..38cd9acd1bee0 Binary files /dev/null and b/docs/images/Discover-ContextView-SizePicker-Older.png differ diff --git a/docs/images/Discover-ContextView.png b/docs/images/Discover-ContextView.png new file mode 100644 index 0000000000000..11d4a59c26e55 Binary files /dev/null and b/docs/images/Discover-ContextView.png differ diff --git a/docs/images/Discover-Start-Annotated.jpg b/docs/images/Discover-Start-Annotated.jpg deleted file mode 100644 index b457a94e13325..0000000000000 Binary files a/docs/images/Discover-Start-Annotated.jpg and /dev/null differ diff --git a/docs/images/Discover-Start-Annotated.png b/docs/images/Discover-Start-Annotated.png new file mode 100644 index 0000000000000..6b68102e1ca33 Binary files /dev/null and b/docs/images/Discover-Start-Annotated.png differ diff --git a/docs/images/Expanded-Document.png b/docs/images/Expanded-Document.png index 057a0c96b9a74..ad2f0db1a7ff9 100644 Binary files a/docs/images/Expanded-Document.png and b/docs/images/Expanded-Document.png differ diff --git a/docs/images/add_filter.png b/docs/images/add_filter.png new file mode 100644 index 0000000000000..0591472c5c9ea Binary files /dev/null and b/docs/images/add_filter.png differ diff --git a/docs/images/add_filter_field.png b/docs/images/add_filter_field.png new file mode 100644 index 0000000000000..f2093ab94e727 Binary files /dev/null and b/docs/images/add_filter_field.png differ diff --git a/docs/images/add_filter_operator.png b/docs/images/add_filter_operator.png new file mode 100644 index 0000000000000..dc2355e8cb2b1 Binary files /dev/null and b/docs/images/add_filter_operator.png differ diff --git a/docs/images/add_filter_value.png b/docs/images/add_filter_value.png new file mode 100644 index 0000000000000..15eeab73943c6 Binary files /dev/null and b/docs/images/add_filter_value.png differ diff --git a/docs/images/autorefresh-intervals.png b/docs/images/autorefresh-intervals.png index f0e48126b32a1..b8c8edaf71592 100644 Binary files a/docs/images/autorefresh-intervals.png and b/docs/images/autorefresh-intervals.png differ diff --git a/docs/images/edit_filter_query.png b/docs/images/edit_filter_query.png new file mode 100644 index 0000000000000..5a0612f17eaf9 Binary files /dev/null and b/docs/images/edit_filter_query.png differ diff --git a/docs/images/edit_filter_query_json.png b/docs/images/edit_filter_query_json.png new file mode 100644 index 0000000000000..242f4610e097f Binary files /dev/null and b/docs/images/edit_filter_query_json.png differ diff --git a/docs/images/filter-allbuttons.png b/docs/images/filter-allbuttons.png index c43b746bbdf37..8bb86f53a5631 100644 Binary files a/docs/images/filter-allbuttons.png and b/docs/images/filter-allbuttons.png differ diff --git a/docs/images/filter-custom-json.png b/docs/images/filter-custom-json.png deleted file mode 100644 index 570ef8533e6c2..0000000000000 Binary files a/docs/images/filter-custom-json.png and /dev/null differ diff --git a/docs/images/introduction_output.png b/docs/images/introduction_output.png index e86729c6fbb6e..ab0174e32cce4 100644 Binary files a/docs/images/introduction_output.png and b/docs/images/introduction_output.png differ diff --git a/docs/images/introduction_screen.png b/docs/images/introduction_screen.png index 216d1c5503dfd..8c058b639ec8c 100644 Binary files a/docs/images/introduction_screen.png and b/docs/images/introduction_screen.png differ diff --git a/docs/images/regionmap.png b/docs/images/regionmap.png new file mode 100644 index 0000000000000..97f2594e8bee6 Binary files /dev/null and b/docs/images/regionmap.png differ diff --git a/docs/images/spy-open-button.png b/docs/images/spy-open-button.png new file mode 100644 index 0000000000000..d01f5095f66d2 Binary files /dev/null and b/docs/images/spy-open-button.png differ diff --git a/docs/images/spy-panel.png b/docs/images/spy-panel.png new file mode 100644 index 0000000000000..5346892f6262f Binary files /dev/null and b/docs/images/spy-panel.png differ diff --git a/docs/images/time-filter-absolute.jpg b/docs/images/time-filter-absolute.jpg index f26cd2261345f..bc54d57f0f737 100644 Binary files a/docs/images/time-filter-absolute.jpg and b/docs/images/time-filter-absolute.jpg differ diff --git a/docs/images/time-filter-relative.jpg b/docs/images/time-filter-relative.jpg index 243ddb0b0b52a..77beca3a3fd46 100644 Binary files a/docs/images/time-filter-relative.jpg and b/docs/images/time-filter-relative.jpg differ diff --git a/docs/images/time-filter.jpg b/docs/images/time-filter.jpg index cf750ad388794..e437f314d849d 100644 Binary files a/docs/images/time-filter.jpg and b/docs/images/time-filter.jpg differ diff --git a/docs/images/time-picker-step.jpg b/docs/images/time-picker-step.jpg new file mode 100644 index 0000000000000..90c749776bb5d Binary files /dev/null and b/docs/images/time-picker-step.jpg differ diff --git a/docs/images/timelion-conditional01.png b/docs/images/timelion-conditional01.png new file mode 100644 index 0000000000000..17c1478780c60 Binary files /dev/null and b/docs/images/timelion-conditional01.png differ diff --git a/docs/images/timelion-conditional02.png b/docs/images/timelion-conditional02.png new file mode 100644 index 0000000000000..9d11eaa26cc43 Binary files /dev/null and b/docs/images/timelion-conditional02.png differ diff --git a/docs/images/timelion-conditional03.png b/docs/images/timelion-conditional03.png new file mode 100644 index 0000000000000..f5dc36d8c2470 Binary files /dev/null and b/docs/images/timelion-conditional03.png differ diff --git a/docs/images/timelion-conditional04.png b/docs/images/timelion-conditional04.png new file mode 100644 index 0000000000000..b359aaade66c3 Binary files /dev/null and b/docs/images/timelion-conditional04.png differ diff --git a/docs/images/timelion-create01.png b/docs/images/timelion-create01.png new file mode 100644 index 0000000000000..735a6baab6aba Binary files /dev/null and b/docs/images/timelion-create01.png differ diff --git a/docs/images/timelion-create02.png b/docs/images/timelion-create02.png new file mode 100644 index 0000000000000..bc88f14ecd139 Binary files /dev/null and b/docs/images/timelion-create02.png differ diff --git a/docs/images/timelion-create03.png b/docs/images/timelion-create03.png new file mode 100644 index 0000000000000..eb5d365133596 Binary files /dev/null and b/docs/images/timelion-create03.png differ diff --git a/docs/images/timelion-customize01.png b/docs/images/timelion-customize01.png new file mode 100644 index 0000000000000..281a0c5533f27 Binary files /dev/null and b/docs/images/timelion-customize01.png differ diff --git a/docs/images/timelion-customize02.png b/docs/images/timelion-customize02.png new file mode 100644 index 0000000000000..059f9db2f6b8b Binary files /dev/null and b/docs/images/timelion-customize02.png differ diff --git a/docs/images/timelion-customize03.png b/docs/images/timelion-customize03.png new file mode 100644 index 0000000000000..52dda2ce34c9a Binary files /dev/null and b/docs/images/timelion-customize03.png differ diff --git a/docs/images/timelion-customize04.png b/docs/images/timelion-customize04.png new file mode 100644 index 0000000000000..3e57e31710c5c Binary files /dev/null and b/docs/images/timelion-customize04.png differ diff --git a/docs/images/timelion-math01.png b/docs/images/timelion-math01.png new file mode 100644 index 0000000000000..d6d5abe66959d Binary files /dev/null and b/docs/images/timelion-math01.png differ diff --git a/docs/images/timelion-math02.png b/docs/images/timelion-math02.png new file mode 100644 index 0000000000000..61bf64ad26fb0 Binary files /dev/null and b/docs/images/timelion-math02.png differ diff --git a/docs/images/timelion-math03.png b/docs/images/timelion-math03.png new file mode 100644 index 0000000000000..dad09242a5977 Binary files /dev/null and b/docs/images/timelion-math03.png differ diff --git a/docs/images/timelion-math04.png b/docs/images/timelion-math04.png new file mode 100644 index 0000000000000..8b85d897283f9 Binary files /dev/null and b/docs/images/timelion-math04.png differ diff --git a/docs/images/timelion-math05.png b/docs/images/timelion-math05.png new file mode 100644 index 0000000000000..9a9fce200ef8d Binary files /dev/null and b/docs/images/timelion-math05.png differ diff --git a/docs/images/timelion-math06.png b/docs/images/timelion-math06.png new file mode 100644 index 0000000000000..e8b0c0ccecfc8 Binary files /dev/null and b/docs/images/timelion-math06.png differ diff --git a/docs/images/timelion-save01.png b/docs/images/timelion-save01.png new file mode 100644 index 0000000000000..47a33c2d36d43 Binary files /dev/null and b/docs/images/timelion-save01.png differ diff --git a/docs/images/timelion-save02.png b/docs/images/timelion-save02.png new file mode 100644 index 0000000000000..348b084ee5259 Binary files /dev/null and b/docs/images/timelion-save02.png differ diff --git a/docs/images/tsvb-annotations.png b/docs/images/tsvb-annotations.png new file mode 100644 index 0000000000000..0542a867996a7 Binary files /dev/null and b/docs/images/tsvb-annotations.png differ diff --git a/docs/images/tsvb-data-tab-derivative-example.png b/docs/images/tsvb-data-tab-derivative-example.png new file mode 100644 index 0000000000000..677ad08e1ddff Binary files /dev/null and b/docs/images/tsvb-data-tab-derivative-example.png differ diff --git a/docs/images/tsvb-data-tab-label.png b/docs/images/tsvb-data-tab-label.png new file mode 100644 index 0000000000000..f00e9dbf2047d Binary files /dev/null and b/docs/images/tsvb-data-tab-label.png differ diff --git a/docs/images/tsvb-data-tab-series-options-time-series.png b/docs/images/tsvb-data-tab-series-options-time-series.png new file mode 100644 index 0000000000000..8ad9fc458e7fa Binary files /dev/null and b/docs/images/tsvb-data-tab-series-options-time-series.png differ diff --git a/docs/images/tsvb-data-tab-series-options.png b/docs/images/tsvb-data-tab-series-options.png new file mode 100644 index 0000000000000..db230f7be5e02 Binary files /dev/null and b/docs/images/tsvb-data-tab-series-options.png differ diff --git a/docs/images/tsvb-gauge.png b/docs/images/tsvb-gauge.png new file mode 100644 index 0000000000000..8bfba642db97a Binary files /dev/null and b/docs/images/tsvb-gauge.png differ diff --git a/docs/images/tsvb-markdown-tab.png b/docs/images/tsvb-markdown-tab.png new file mode 100644 index 0000000000000..50d489f26a8c4 Binary files /dev/null and b/docs/images/tsvb-markdown-tab.png differ diff --git a/docs/images/tsvb-markdown.png b/docs/images/tsvb-markdown.png new file mode 100644 index 0000000000000..a08839e4c7b0c Binary files /dev/null and b/docs/images/tsvb-markdown.png differ diff --git a/docs/images/tsvb-metric.png b/docs/images/tsvb-metric.png new file mode 100644 index 0000000000000..27d8ea53f9854 Binary files /dev/null and b/docs/images/tsvb-metric.png differ diff --git a/docs/images/tsvb-screenshot.png b/docs/images/tsvb-screenshot.png new file mode 100644 index 0000000000000..c51b82c8770e2 Binary files /dev/null and b/docs/images/tsvb-screenshot.png differ diff --git a/docs/images/tsvb-timeseries.png b/docs/images/tsvb-timeseries.png new file mode 100644 index 0000000000000..29101c8376aff Binary files /dev/null and b/docs/images/tsvb-timeseries.png differ diff --git a/docs/images/tsvb-top-n.png b/docs/images/tsvb-top-n.png new file mode 100644 index 0000000000000..1a4077c6aaecd Binary files /dev/null and b/docs/images/tsvb-top-n.png differ diff --git a/docs/images/tutorial-visualize-landing.png b/docs/images/tutorial-visualize-landing.png new file mode 100644 index 0000000000000..a8400aacaaba2 Binary files /dev/null and b/docs/images/tutorial-visualize-landing.png differ diff --git a/docs/images/tutorial-visualize-map-2.png b/docs/images/tutorial-visualize-map-2.png index 8b903f59e9700..08464b55ee6c2 100644 Binary files a/docs/images/tutorial-visualize-map-2.png and b/docs/images/tutorial-visualize-map-2.png differ diff --git a/docs/images/tutorial-visualize-map-3.png b/docs/images/tutorial-visualize-map-3.png index e21b643b8179f..c0c47c7eac3f9 100644 Binary files a/docs/images/tutorial-visualize-map-3.png and b/docs/images/tutorial-visualize-map-3.png differ diff --git a/docs/images/tutorial-visualize-wizard-step-1.png b/docs/images/tutorial-visualize-wizard-step-1.png new file mode 100644 index 0000000000000..798709a42d945 Binary files /dev/null and b/docs/images/tutorial-visualize-wizard-step-1.png differ diff --git a/docs/images/tutorial-visualize-wizard-step-2.png b/docs/images/tutorial-visualize-wizard-step-2.png new file mode 100644 index 0000000000000..143392f3e234a Binary files /dev/null and b/docs/images/tutorial-visualize-wizard-step-2.png differ diff --git a/docs/images/tutorial-visualize.png b/docs/images/tutorial-visualize.png deleted file mode 100644 index 80264164114b6..0000000000000 Binary files a/docs/images/tutorial-visualize.png and /dev/null differ diff --git a/docs/index-shared1.asciidoc b/docs/index-shared1.asciidoc new file mode 100644 index 0000000000000..266049a581150 --- /dev/null +++ b/docs/index-shared1.asciidoc @@ -0,0 +1,16 @@ + +include::{asciidoc-dir}/../../shared/versions.asciidoc[] + +:docker-image: docker.elastic.co/kibana/kibana:{version} +:repo: https://github.com/elastic/kibana/ +:issue: {repo}issues/ +:pull: {repo}pull/ +:commit: {repo}commit/ +:blob: {repo}blob/{branch}/ +:security-ref: https://www.elastic.co/community/security/ + +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] + +include::introduction.asciidoc[] + +include::setup.asciidoc[] diff --git a/docs/index-shared2.asciidoc b/docs/index-shared2.asciidoc new file mode 100644 index 0000000000000..7375432906a0d --- /dev/null +++ b/docs/index-shared2.asciidoc @@ -0,0 +1,12 @@ + +include::migration.asciidoc[] + +include::getting-started.asciidoc[] + +include::discover.asciidoc[] + +include::visualize.asciidoc[] + +include::dashboard.asciidoc[] + +include::timelion.asciidoc[] diff --git a/docs/index-shared3.asciidoc b/docs/index-shared3.asciidoc new file mode 100644 index 0000000000000..5b8e8ae130eb4 --- /dev/null +++ b/docs/index-shared3.asciidoc @@ -0,0 +1,11 @@ + +[[devtools-kibana]] += Dev Tools + +[partintro] +-- +The *Dev Tools* page contains development tools that you can use to interact +with your data in Kibana. +-- + +include::console.asciidoc[] diff --git a/docs/index-shared4.asciidoc b/docs/index-shared4.asciidoc new file mode 100644 index 0000000000000..d448bb691c99c --- /dev/null +++ b/docs/index-shared4.asciidoc @@ -0,0 +1,2 @@ + +include::management.asciidoc[] diff --git a/docs/index-shared5.asciidoc b/docs/index-shared5.asciidoc new file mode 100644 index 0000000000000..9e1931a2148f7 --- /dev/null +++ b/docs/index-shared5.asciidoc @@ -0,0 +1,8 @@ + +include::plugins.asciidoc[] + +include::development.asciidoc[] + +include::limitations.asciidoc[] + +include::release-notes.asciidoc[] diff --git a/docs/index.asciidoc b/docs/index.asciidoc index 7919933715f2e..8fd05249f9f90 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -1,44 +1,8 @@ [[kibana-guide]] = Kibana User Guide -////////// -release-state can be: released | prerelease | unreleased -////////// -:release-state: unreleased -:version: 6.0.0-alpha1 -:major-version: 6.x -:branch: master - -:docker-image: docker.elastic.co/kibana/kibana:{version} -:es-ref: https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ -:xpack-ref: https://www.elastic.co/guide/en/x-pack/current/ -:repo: https://github.com/elastic/kibana/ -:issue: {repo}issues/ -:pull: {repo}pull/ -:commit: {repo}commit/ -:security: https://www.elastic.co/community/security/ - - -include::introduction.asciidoc[] - -include::setup.asciidoc[] - -include::migration.asciidoc[] - -include::getting-started.asciidoc[] - -include::discover.asciidoc[] - -include::visualize.asciidoc[] - -include::dashboard.asciidoc[] - -include::timelion.asciidoc[] - -include::console.asciidoc[] - -include::management.asciidoc[] - -include::plugins.asciidoc[] - -include::development.asciidoc[] +include::index-shared1.asciidoc[] +include::index-shared2.asciidoc[] +include::index-shared3.asciidoc[] +include::index-shared4.asciidoc[] +include::index-shared5.asciidoc[] diff --git a/docs/limitations.asciidoc b/docs/limitations.asciidoc new file mode 100644 index 0000000000000..04bf5a72a24dc --- /dev/null +++ b/docs/limitations.asciidoc @@ -0,0 +1,12 @@ +[[limitations]] += Limitations + +[partintro] +-- +Kibana currently has the following limitations. + +* <> + +-- + +include::limitations/nested-objects.asciidoc[] diff --git a/docs/limitations/nested-objects.asciidoc b/docs/limitations/nested-objects.asciidoc new file mode 100644 index 0000000000000..214f33eef5c42 --- /dev/null +++ b/docs/limitations/nested-objects.asciidoc @@ -0,0 +1,11 @@ +[[nested-objects]] +== Nested Objects + +Kibana cannot perform aggregations across fields that contain nested objects. +It also cannot search on nested objects when Lucene Query Syntax is used in +the query bar. + +[IMPORTANT] +============================================== +Using `include_in_parent` or `copy_to` as a workaround is not supported and may stop functioning in future releases. +============================================== diff --git a/docs/management/advanced-options.asciidoc b/docs/management/advanced-options.asciidoc index a193f03a76847..059163888e8a2 100644 --- a/docs/management/advanced-options.asciidoc +++ b/docs/management/advanced-options.asciidoc @@ -22,7 +22,9 @@ compatible with other configuration settings. Deleting a custom setting removes .Kibana Settings Reference [horizontal] `query:queryString:options`:: Options for the Lucene query string parser. -`sort:options`:: Options for the Elasticsearch {es-ref}search-request-sort.html[sort] parameter. +`search:queryLanguage`:: Default is `lucene`. Query language used by the query bar. Choose between the lucene query syntax and kuery, an experimental new language built specifically for Kibana. +`search:queryLanguage:switcher:enable`:: Show or hide the query language switcher in the query bar. +`sort:options`:: Options for the Elasticsearch {ref}/search-request-sort.html[sort] parameter. `dateFormat`:: The format to use for displaying pretty-formatted dates. `dateFormat:tz`:: The timezone that Kibana uses. The default value of `Browser` uses the timezone detected by the browser. `dateFormat:scaled`:: These values define the format used to render ordered time-based data. Formatted timestamps must @@ -33,10 +35,9 @@ adapt to the interval between measurements. Keys are http://en.wikipedia.org/wik `metaFields`:: An array of fields outside of `_source`. Kibana merges these fields into the document when displaying the document. `discover:sampleSize`:: The number of rows to show in the Discover table. +`discover:aggs:terms:size`:: Determines how many terms will be visualized when clicking the "visualize" button, in the field drop downs, in the discover sidebar. The default value is `20`. `doc_table:highlight`:: Highlight results in Discover and Saved Searches Dashboard. Highlighting makes request slow when working on big documents. Set this property to `false` to disable highlighting. -`doc_table:highlight:all_fields`:: Improves highlighting by using a separate `highlight_query` that uses `all_fields` mode on -`query_string` queries. Set to `false` if you are using a `default_field` in your index. `courier:maxSegmentCount`:: Kibana splits requests in the Discover app into segments to limit the size of requests sent to the Elasticsearch cluster. This setting constrains the length of the segment list. Long segment lists can significantly increase request processing time. @@ -46,11 +47,12 @@ increase request processing time. `histogram:maxBars`:: Date histograms are not generated with more bars than the value of this property, scaling values when necessary. `visualization:tileMap:maxPrecision`:: The maximum geoHash precision displayed on tile maps: 7 is high, 10 is very high, -12 is the maximum. {es-ref}search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[Explanation of cell dimensions]. -`visualization:tileMap:WMSdefaults`:: Default properties for the WMS map server support in the tile map. +12 is the maximum. {ref}/search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[Explanation of cell dimensions]. +`visualization:tileMap:WMSdefaults`:: Default properties for the WMS map server support in the coordinate map. `visualization:colorMapping`:: Maps values to specified colors within visualizations. `visualization:loadingDelay`:: Time to wait before dimming visualizations during query. `visualization:dimmingOpacity`:: When part of a visualization is highlighted, by hovering over it for example, ths is the opacity applied to the other elements. A higher number means other elements will be less opaque. +`visualization:regionmap:showWarnings`:: Whether the region map show a warning when terms cannot be joined to a shape on the map. `csv:separator`:: A string that serves as the separator for exported values. `csv:quoteValues`:: Set this property to `true` to quote exported values. `history:limit`:: In fields that have history, such as query inputs, the value of this property limits how many recent @@ -72,12 +74,14 @@ mentioned use "_default_". `timepicker:refreshIntervalDefaults`:: The time filter's default refresh interval. `dashboard:defaultDarkTheme`:: Set this property to `true` to make new dashboards use the dark theme by default. `filters:pinnedByDefault`:: Set this property to `true` to make filters have a global state by default. +`filterEditor:suggestValues`:: Set this property to `true` to have the filter editor suggest values for fields, instead of just providing a text input. This may result in heavy queries to Elasticsearch. `notifications:banner`:: You can specify a custom banner to display temporary notices to all users. This field supports Markdown. `notifications:lifetime:banner`:: Specifies the duration in milliseconds for banner notification displays. The default value is 3000000. Set this field to `Infinity` to disable banner notifications. `notifications:lifetime:error`:: Specifies the duration in milliseconds for error notification displays. The default value is 300000. Set this field to `Infinity` to disable error notifications. `notifications:lifetime:warning`:: Specifies the duration in milliseconds for warning notification displays. The default value is 10000. Set this field to `Infinity` to disable warning notifications. `notifications:lifetime:info`:: Specifies the duration in milliseconds for information notification displays. The default value is 5000. Set this field to `Infinity` to disable information notifications. +`metrics:max_buckets`:: The maximum numbers of buckets that cannot be exceeded. For example, this can arise when the user selects a short interval like (e.g. 1s) for a long time period (e.g. 1 year) `timelion:showTutorial`:: Set this property to `true` to show the Timelion tutorial to users when they first open Timelion. `timelion:es.timefield`:: Default field containing a timestamp when using the `.es()` query. `timelion:es.default_index`:: Default index when using the `.es()` query. @@ -90,3 +94,4 @@ Markdown. `state:storeInSessionStorage`:: [experimental] Kibana tracks UI state in the URL, which can lead to problems when there is a lot of information there and the URL gets very long. Enabling this will store parts of the state in your browser session instead, to keep the URL shorter. `context:defaultSize`:: Specifies the initial number of surrounding entries to display in the context view. The default value is 5. `context:step`:: Specifies the number to increment or decrement the context size by when using the buttons in the context view. The default value is 5. +`context:tieBreakerFields`:: A comma-separated list of fields to use for tiebreaking between documents that have the same timestamp value. From this list the first field that is present and sortable in the current index pattern is used. diff --git a/docs/management/index-patterns.asciidoc b/docs/management/index-patterns.asciidoc index d453bd3d28076..ee3c696f0a460 100644 --- a/docs/management/index-patterns.asciidoc +++ b/docs/management/index-patterns.asciidoc @@ -35,76 +35,11 @@ list. contains time-based events* option and select the index field that contains the timestamp. Kibana reads the index mapping to list all of the fields that contain a timestamp. -. By default, Kibana restricts wildcard expansion of time-based index patterns to indices with data within the currently -selected time range. Click *Do not expand index pattern when search* to disable this behavior. - . Click *Create* to add the index pattern. . To designate the new pattern as the default pattern to load when you view the Discover tab, click the *favorite* button. -NOTE: When you define an index pattern, indices that match that pattern must exist in Elasticsearch. Those indices must -contain data. - -To use an event time in an index name, enclose the static text in the pattern and specify the date format using the -tokens described in the following table. - -For example, `[logstash-]YYYY.MM.DD` matches all indices whose names have a timestamp of the form `YYYY.MM.DD` appended -to the prefix `logstash-`, such as `logstash-2015.01.31` and `logstash-2015-02-01`. - -[float] -[[date-format-tokens]] -.Date Format Tokens -[horizontal] -`M`:: Month - cardinal: 1 2 3 ... 12 -`Mo`:: Month - ordinal: 1st 2nd 3rd ... 12th -`MM`:: Month - two digit: 01 02 03 ... 12 -`MMM`:: Month - abbreviation: Jan Feb Mar ... Dec -`MMMM`:: Month - full: January February March ... December -`Q`:: Quarter: 1 2 3 4 -`D`:: Day of Month - cardinal: 1 2 3 ... 31 -`Do`:: Day of Month - ordinal: 1st 2nd 3rd ... 31st -`DD`:: Day of Month - two digit: 01 02 03 ... 31 -`DDD`:: Day of Year - cardinal: 1 2 3 ... 365 -`DDDo`:: Day of Year - ordinal: 1st 2nd 3rd ... 365th -`DDDD`:: Day of Year - three digit: 001 002 ... 364 365 -`d`:: Day of Week - cardinal: 0 1 3 ... 6 -`do`:: Day of Week - ordinal: 0th 1st 2nd ... 6th -`dd`:: Day of Week - 2-letter abbreviation: Su Mo Tu ... Sa -`ddd`:: Day of Week - 3-letter abbreviation: Sun Mon Tue ... Sat -`dddd`:: Day of Week - full: Sunday Monday Tuesday ... Saturday -`e`:: Day of Week (locale): 0 1 2 ... 6 -`E`:: Day of Week (ISO): 1 2 3 ... 7 -`w`:: Week of Year - cardinal (locale): 1 2 3 ... 53 -`wo`:: Week of Year - ordinal (locale): 1st 2nd 3rd ... 53rd -`ww`:: Week of Year - 2-digit (locale): 01 02 03 ... 53 -`W`:: Week of Year - cardinal (ISO): 1 2 3 ... 53 -`Wo`:: Week of Year - ordinal (ISO): 1st 2nd 3rd ... 53rd -`WW`:: Week of Year - two-digit (ISO): 01 02 03 ... 53 -`YY`:: Year - two digit: 70 71 72 ... 30 -`YYYY`:: Year - four digit: 1970 1971 1972 ... 2030 -`gg`:: Week Year - two digit (locale): 70 71 72 ... 30 -`gggg`:: Week Year - four digit (locale): 1970 1971 1972 ... 2030 -`GG`:: Week Year - two digit (ISO): 70 71 72 ... 30 -`GGGG`:: Week Year - four digit (ISO): 1970 1971 1972 ... 2030 -`A`:: AM/PM: AM PM -`a`:: am/pm: am pm -`H`:: Hour: 0 1 2 ... 23 -`HH`:: Hour - two digit: 00 01 02 ... 23 -`h`:: Hour - 12-hour clock: 1 2 3 ... 12 -`hh`:: Hour - 12-hour clock, 2 digit: 01 02 03 ... 12 -`m`:: Minute: 0 1 2 ... 59 -`mm`:: Minute - two-digit: 00 01 02 ... 59 -`s`:: Second: 0 1 2 ... 59 -`ss`:: Second - two-digit: 00 01 02 ... 59 -`S`:: Fractional Second - 10ths: 0 1 2 ... 9 -`SS`:: Fractional Second - 100ths: 0 1 ... 98 99 -`SSS`:: Fractional Seconds - 1000ths: 0 1 ... 998 999 -`Z`:: Timezone - zero UTC offset (hh:mm format): -07:00 -06:00 -05:00 .. +07:00 -`ZZ`:: Timezone - zero UTC offset (hhmm format): -0700 -0600 -0500 ... +0700 -`X`:: Unix Timestamp: 1360013296 -`x`:: Unix Millisecond Timestamp: 1360013296123 - [float] [[set-default-pattern]] == Setting the Default Index Pattern @@ -144,3 +79,5 @@ To delete an index pattern: . Select the pattern you want to remove in the Index Patterns list. . Click the pattern's *Delete* button. . Confirm that you want to remove the index pattern. + +include::index-patterns/management-cross-cluster-search.asciidoc[] diff --git a/docs/management/index-patterns/management-cross-cluster-search.asciidoc b/docs/management/index-patterns/management-cross-cluster-search.asciidoc new file mode 100644 index 0000000000000..4af9a4b1196a6 --- /dev/null +++ b/docs/management/index-patterns/management-cross-cluster-search.asciidoc @@ -0,0 +1,33 @@ +[[management-cross-cluster-search]] +=== Cross Cluster Search + +beta[] + +Elasticsearch supports the ability to run search and aggregation requests across multiple +clusters using a module called _cross cluster search_. + +In order to take advantage of cross cluster search, you must configure your Elasticsearch +clusters accordingly. Review the corresponding Elasticsearch +{ref}/modules-cross-cluster-search.html[documentation] before attempting to use cross cluster +search in Kibana. + +Once your Elasticsearch clusters are configured for cross cluster search, you can create +specific index patterns in Kibana to search across the clusters of your choosing. Using the +same syntax that you'd use in a raw cross cluster search request in Elasticsearch, create your +index pattern in Kibana with the convention `:`. + +For example, if you want to query logstash indices across two of the Elasticsearch clusters +that you set up for cross cluster search, which were named `cluster_one` and `cluster_two`, +you would use `cluster_one,cluster_two:logstash-*` as your index pattern in Kibana. + +Just like in raw search requests in Elasticsearch, you can use wildcards in your cluster names +to match any number of clusters, so if you wanted to search logstash indices across any +clusters named `cluster_foo`, `cluster_bar`, and so on, you would use `cluster_*:logstash-*` +as your index pattern in Kibana. + +If you want to query across all Elasticsearch clusters that have been configured for cross +cluster search, then use a standalone wildcard for your cluster name in your Kibana index +pattern: `*:logstash-*`. + +Once an index pattern is configured using the cross cluster search syntax, all searches and +aggregations using that index pattern in Kibana take advantage of cross cluster search. diff --git a/docs/management/managing-fields.asciidoc b/docs/management/managing-fields.asciidoc index b7129d5047b2c..b2170ac3ea8fa 100644 --- a/docs/management/managing-fields.asciidoc +++ b/docs/management/managing-fields.asciidoc @@ -72,13 +72,13 @@ WARNING: Computing data on the fly with scripted fields can be very resource int Kibana's performance. Keep in mind that there's no built-in validation of a scripted field. If your scripts are buggy, you'll get exceptions whenever you try to view the dynamically generated data. -When you define a scripted field in Kibana, you have a choice of scripting languages. Starting with 5.0, the default -options are {es-ref}modules-scripting-expression.html[Lucene expressions] and {es-ref}modules-scripting-painless.html[Painless]. -While you can use other scripting languages if you enable dynamic scripting for them in Elasticsearch, this is not recommended -because they cannot be sufficiently {es-ref}modules-scripting-security.html[sandboxed]. +When you define a scripted field in Kibana, you have a choice of scripting languages. Starting with 5.0, the default +options are {ref}/modules-scripting-expression.html[Lucene expressions] and {ref}/modules-scripting-painless.html[Painless]. +While you can use other scripting languages if you enable dynamic scripting for them in Elasticsearch, this is not recommended +because they cannot be sufficiently {ref}/modules-scripting-security.html[sandboxed]. -WARNING: Use of Groovy, Javascript, and Python scripting is deprecated starting in Elasticsearch 5.0, and support for those -scripting languages will be removed in the future. +WARNING: Use of Groovy, Javascript, and Python scripting is deprecated starting in Elasticsearch 5.0, and support for those +scripting languages will be removed in the future. You can reference any single value numeric field in your expressions, for example: @@ -86,7 +86,7 @@ You can reference any single value numeric field in your expressions, for exampl doc['field_name'].value ---- -For more background on scripted fields and additional examples, refer to this blog: +For more background on scripted fields and additional examples, refer to this blog: https://www.elastic.co/blog/using-painless-kibana-scripted-fields[Using Painless in Kibana scripted fields] [float] @@ -103,7 +103,7 @@ To create a scripted field: . Click *Save Scripted Field*. For more information about scripted fields in Elasticsearch, see -{es-ref}modules-scripting.html[Scripting]. +{ref}/modules-scripting.html[Scripting]. [float] [[update-scripted-field]] diff --git a/docs/migration/migrate_6_0.asciidoc b/docs/migration/migrate_6_0.asciidoc index b0122e66eb1ef..15666cda1b8c7 100644 --- a/docs/migration/migrate_6_0.asciidoc +++ b/docs/migration/migrate_6_0.asciidoc @@ -4,6 +4,7 @@ This section discusses the changes that you need to be aware of when migrating your application to Kibana 6.0. + [float] === Removed option to use unsupported scripting languages *Details:* Kibana 5.x allowed users to create scripted fields using any scripting language enabled in Elasticsearch. @@ -12,6 +13,7 @@ Kibana 6.0 will only support Painless and Lucene expression based scripts. *Impact:* You will need to migrate your groovy, python, javascript, etc. scripted fields to Painless or Lucene expressions. + [float] === Changed response format of status API *Details:* In an effort to align with our style guidelines and provide a digestible response, @@ -20,4 +22,53 @@ the status API has changed: * Properties are now snake cased and several have been renamed * Metrics now provide the latest available data instead of samples over time -*Impact:* You will need to update anything using the status API and expecting the previous response format. \ No newline at end of file +*Impact:* You will need to update anything using the status API and expecting the previous response format. + + +[float] +=== Timelion requires comma to separate queries +*Details:* Kibana 5.x allowed users to use spaces as a query separator in timelion `.es(400) .es(500)`. +This is no longer the case. Now, only commas are a valid query separator: e.g. `.es(400), .es(500)` + +*Impact:* You will need to migrate your stored timelion queries to the new syntax. + + +[float] +=== Requires 64 bit operating system +*Details:* Kibana 6.0.0 and onward only support 64 bit operating systems. + +*Impact:* You will need to install Kibana 6.x on a 64 bit operating system. No extra data migration steps are necessary when moving from 32 bit to 64 bit. + + +[float] +=== NODE_ENV no longer has an impact on Kibana +*Details:* Setting the NODE_ENV environment variable can break Kibana processes in unexpected ways, which is especially unfortunate since it is a common environment variable to have configured on a system, and you wouldn't expect it to break anything in Kibana. Kibana will now effectively ignore NODE_ENV entirely. + +*Impact:* If you're developing a custom plugin that depends on NODE_ENV, you will need to update it to use a different, custom environment variable. + + +[float] +=== Kibana 4.x configuration names using `_` instead of `.` have been removed +*Details:* In Kibana 4.2, we renamed all configuration names in kibana.yml to use `.` as a separator instead of `_`, though the legacy configurations would still continue to work. In 5.0, we started logging deprecation notices whenever the legacy configurations were encountered. In 6.0 onward, legacy configuration names that use an underscore instead of a dot will no longer work. + +*Impact:* Any usages of underscore separated configuration names in kibana.yml need to be updated to their modern equivalents. See <> for accepted configurations. + +[float] +=== Time-interval based index patterns are no longer supported +*Details:* Starting in Kibana 6.0.0 we removed the ability to create index patterns that use a date-pattern and interval to identify Elasticsearch indices. Index patterns must now use wildcards which are more performant in most cases. + +*Impact:* Existing index patterns and saved objects will continue to function without issue, and in a subsequent release we will provide utilities to migrate your index patterns/saved objects. + + +[float] +=== Removed same port http to https redirect behavior +*Details:* Kibana 5.x redirected requests from http to https on the same port if TLS was configured. Starting in Kibana 6.0.0, Kibana no longer redirects basic http traffic to https. + +*Impact:* With the new configuration setting `server.ssl.redirectHttpFromPort` you can specify a port that will redirect from http to https. This cannot be the same port as the https port. + + +[float] +=== Removed "expand index pattern when searching" setting for index patterns +*Details:* Since 4.3, index patterns could be configured to do a pre-flight field_stats request before a search in order to determine exact indices that could contain matching documents. Elasticsearch now optimizes searches internally in a similar way and has also removed the field_stats API, so this option was removed from Kibana entirely. + +*Impact:* No change is required for existing Kibana index patterns. Those previously configured with this option will gracefully use the new Elasticsearch optimizations instead, as will all new index patterns. diff --git a/docs/plugins.asciidoc b/docs/plugins.asciidoc index 0c2d8355dc4e2..b9365dd54b075 100644 --- a/docs/plugins.asciidoc +++ b/docs/plugins.asciidoc @@ -66,6 +66,20 @@ If plugins were installed as a different user and the server is not starting, th [source,shell] $ chown -R kibana:kibana /path/to/kibana/optimize +[float] +=== Proxy support for plugin installation + +Kibana supports plugin installation via a proxy. It uses the `http_proxy` and `https_proxy` +environment variables to detect a proxy for HTTP and HTTPS URLs. + +It also respects the `no_proxy` environment variable to exclude specific URLs from proxying. + +You can specify the environment variable directly when installing plugins: + +[source,shell] +$ http_proxy="http://proxy.local:4242" bin/kibana-plugin install + + == Updating & Removing Plugins To update a plugin, remove the current version and reinstall the plugin. diff --git a/docs/plugins/known-plugins.asciidoc b/docs/plugins/known-plugins.asciidoc index 23c063bd3d5d7..93e4038291041 100644 --- a/docs/plugins/known-plugins.asciidoc +++ b/docs/plugins/known-plugins.asciidoc @@ -25,24 +25,31 @@ This list of plugins is not guaranteed to work on your version of Kibana. Instea [float] === Visualizations -* https://github.com/prelert/kibana-swimlane-vis[Swimlanes] (prelert) -* https://github.com/sbeyn/kibana-plugin-line-sg[Line] (sbeyn) -* https://github.com/sbeyn/kibana-plugin-gauge-sg[Gauge] (sbeyn) -* https://github.com/sbeyn/kibana-plugin-traffic-sg[Traffic] (sbeyn) +* https://github.com/virusu/3D_kibana_charts_vis[3D Charts] (virusu) * https://github.com/JuanCarniglia/area3d_vis[3D Graph] (JuanCarniglia) -* https://github.com/nreese/enhanced_tilemap[Enhanced Tilemap] (nreese) -* https://github.com/dlumbrer/kbn_network[Network Plugin] (dlumbrer) * https://github.com/mstoyano/kbn_c3js_vis[C3JS Visualizations] (mstoyano) -* https://github.com/clamarque/Kibana_health_metric_vis[Health Metric] (clamarque) -* https://github.com/ommsolutions/kibana_ext_metrics_vis[Extended Metric] (ommsolutions) -* https://github.com/virusu/3D_kibana_charts_vis[3D Charts] (virusu) -* https://github.com/DeanF/health_metric_vis[Colored Metric Visualization] (deanf) * https://github.com/elo7/cohort[Cohort analysis] (elo7) -* https://github.com/amannocci/kibana-plugin-metric-percent[Percent] (amannocci) +* https://github.com/DeanF/health_metric_vis[Colored Metric Visualization] (deanf) +* https://github.com/nreese/enhanced_tilemap[Enhanced Tilemap] (nreese) +* https://github.com/ommsolutions/kibana_ext_metrics_vis[Extended Metric] (ommsolutions) * https://github.com/outbrain/ob-kb-funnel[Funnel Visualization] (roybass) +* https://github.com/sbeyn/kibana-plugin-gauge-sg[Gauge] (sbeyn) +* https://github.com/clamarque/Kibana_health_metric_vis[Health Metric] (clamarque) +* https://github.com/sbeyn/kibana-plugin-line-sg[Line] (sbeyn) +* https://github.com/dlumbrer/kbn_network[Network Plugin] (dlumbrer) +* https://github.com/amannocci/kibana-plugin-metric-percent[Percent] (amannocci) +* https://github.com/dlumbrer/kbn_searchtables[Search-Tables] (dlumbrer) +* https://github.com/prelert/kibana-swimlane-vis[Swimlanes] (prelert) +* https://github.com/sbeyn/kibana-plugin-traffic-sg[Traffic] (sbeyn) +* https://github.com/PhaedrusTheGreek/transform_vis[Transform Visualization] (PhaedrusTheGreek) +* https://github.com/nyurik/kibana-vega-vis[Vega-based visualization with map support] (nyurik) [float] === Other * https://github.com/nreese/kibana-time-plugin[Time picker as a dashboard panel] Widget to view and edit the time range from within dashboards. +* https://github.com/Webiks/kibana-API.git[Kibana-API] (webiks) Exposes an API with Kibana functionality. +Use it to create, edit and embed visualizations, and also to search inside an embedded dashboard. + + NOTE: If you want your plugin to be added to this page, open a {repo}tree/{branch}/docs/plugins/known-plugins.asciidoc[pull request]. diff --git a/docs/release-notes.asciidoc b/docs/release-notes.asciidoc new file mode 100644 index 0000000000000..bd3b2ce7b5c60 --- /dev/null +++ b/docs/release-notes.asciidoc @@ -0,0 +1,13 @@ +[[release-notes]] += Release Notes + +[partintro] +-- +This section summarizes the changes in each release. + +* <> +* <> + +-- +include::release-notes/6.0.0-alpha2.asciidoc[] +include::release-notes/6.0.0-alpha1.asciidoc[] diff --git a/docs/release-notes/6.0.0-alpha1.asciidoc b/docs/release-notes/6.0.0-alpha1.asciidoc new file mode 100644 index 0000000000000..dd047c4114a74 --- /dev/null +++ b/docs/release-notes/6.0.0-alpha1.asciidoc @@ -0,0 +1,78 @@ +[[release-notes-6.0.0-alpha1]] +== 6.0.0-alpha1 Release Notes + +Also see <>. + +[float] +[[enhancement-6.0.0-alpha1]] +=== Enhancements +Core:: +* Deprecate Bootstrap buttons {pull}11352[#11352] +* Fix missing border of PaginatedTable rows in Firefox {pull}11452[#11452] +* Reinstate a few Bootstrap btn classes to support angular-ui-select. {pull}11569[#11569] +* Remove Bootstrap btn classes. {pull}11353[#11353] +* [UI Framework] Add AssistedInput. {pull}11343[#11343] +* [UI Framework] Add example of disabled kuiSelect. {pull}11345[#11345] +* [UI Framework] Add example of spinner Icon. {pull}11424[#11424] +* [UI Framework] Add kuiButton--fullWidth kuiButton--small, and kuiButtonGroup--fullWidth modifiers. {pull}11365[#11365] +* [UI Framework] Add support for dark theme links. {pull}11344[#11344] +* [UI Framework] Add support for selects and secondary inputs to LocalNavSearch component of UI Framework. {pull}11287[#11287] +* [UI Framework] Apply elastic.co look and feel to UI Framework docs site. {pull}11174[#11174] +* [UI Framework] Fix appearance of some form components in Firefox {pull}11589[#11589] +* [UI Framework] Make CardGroup split apart by default. Add kuiCardGroup--united modifier. {pull}11580[#11580] +* [UI Framework] Vertical align children in a FieldGroup. {pull}11374[#11374] +* Add small text input to UI framework {pull}11354[#11354] +* [UI Framework] Add uiFramework:build task. {pull}11402[#11402] +* Updates to status API, re-align status page {pull}10180[#10180] +* [status] 15m load average should use the 3rd index {pull}11202[#11202] +* Sets ES mapping to single_type=false {pull}11451[#11451] +Dev Tools:: +* Adding autocomplete rules for reindex API to Console {pull}10150[#10150] +* minimum_number_should_match was deprecated {pull}11316[#11316] +Dashboard:: +* Improve Dashboard screen-reader accessibility. {pull}11600[#11600] +* Update angular-bootstrap DatePicker with UI Framework classes. {pull}11378[#11378] +* Finish loading a dashboard even if some visualizations throw errors {pull}11324[#11324] +* React search box tool bar {pull}10821[#10821] +* Don't kill an entire dashboard because of one bad request {pull}11337[#11337] +* Add back dashboard descriptions {pull}11552[#11552] +* Hide the second toast when adding a new visualization straight from dashboard {pull}11621[#11621] +Discover:: +* Add ignore_unmapped to geo filters to prevent exceptions {pull}11461[#11461] +* Create CollapseButton component class to standardize appearance of this button. {pull}11462[#11462] +* Migrate deprecated match query syntax {pull}11554[#11554] +* Remove the _source field toggle button which was accidentally reinstated {pull}11485[#11485] +* Search bar drop-down for managing filters {pull}10976[#10976] +Management:: +* Convert Index Pattern Creation form UI to use UI Framework components. {pull}11419[#11419] +* Convert ScriptedFieldsTable and SourceFiltersTable to UI Framework components. {pull}11401[#11401] +* Removes the "Index contains time-based events" checkbox {pull}11409[#11409] +* Update Settings page with UI Framework components. {pull}11272[#11272] +* Report shard failures in the field_capabilities response {pull}11450[#11450] +Visualize:: +* Fix spelling in time series visual builder {pull}11212[#11212] +* Adding label templates to legend keys for TSVB {pull}11266[#11266] +* Fix missing icons in Visualize listing. {pull}11243[#11243] +* Update illegible vis type icons with legible ones. {pull}11317[#11317] +* Fixing the fields for the numbers for percentile aggs for Time Series Visual Builder {pull}11169[#11169] +* using ui-select for field selection in visualize {pull}10998[#10998] +* add polygon drawing tool {pull}11578[#11578] +* Fix timelion's flot when neither thor nor monitoring are installed {pull}10412[#10412] + +[float] +[[bug-6.0.0-alpha1]] +=== Bug fixes +Core:: +* [Fix for #4964] Disable dynamic/Implement static mappings {pull}10638[#10638] +* Fix visualize sort icon bug {pull}11568[#11568] +Visualize:: +* Fixes #11232 - Add option for panel and global filters to annotations for TSVB {pull}11260[#11260] +* Fixes #11289 - Change top_n to not use split colors for TSVB {pull}11342[#11342] +* [Fix for #10907] allows more flexible timelion queries {pull}10961[#10961] +* [Fix for #10292] fixing heatmap black squares {pull}11489[#11489] +* [Fix for #4599] Add "Sum of series in legend" option {pull}7970[#7970] +* [Fix for #9053] [timelion/fit/carry] do nothing if there is not any data {pull}9054[#9054] +* [Fix for #8763] [vislib/tilemap/heatmap] scale the heatmap maxZoom with map zoom {pull}8765[#8765] +* [Fix for #9184] fixes error with custom interval in datetime aggregation {pull}9427[#9427] + + diff --git a/docs/release-notes/6.0.0-alpha2.asciidoc b/docs/release-notes/6.0.0-alpha2.asciidoc new file mode 100644 index 0000000000000..080bb7715c140 --- /dev/null +++ b/docs/release-notes/6.0.0-alpha2.asciidoc @@ -0,0 +1,52 @@ +[[release-notes-6.0.0-alpha2]] +== 6.0.0-alpha2 Release Notes + +Also see <>. + + +[float] +[[breaking-6.0.0-alpha2]] +=== Breaking changes +Core:: +* Remove legacy pre-4.2 configurations {pull}12013[#12013] +* NODE_ENV no longer has an impact on Kibana {pull}12010[#12010] +* Only support 64 bit operating systems {pull}11941[#11941] + + +[float] +[[feature-6.0.0-alpha2]] +=== New features +Core:: +* Getting started page {pull}11805[#11805] +Dashboard:: +* Clone dashboard from view mode {pull}10925[#10925] +Visualize:: +* Region map {pull}10937[#10937] +* Gauge chart {pull}10336[#10336] + + +[float] +[[enhancement-6.0.0-alpha2]] +=== Enhancements +Core:: +* Add new UI setting to control the amount of items in listing pages {pull}11674[#11674] +Discover:: +* Apply filters to the event context view {pull}11466[#11466] +Timelion:: +* Support multiline Timelion queries {pull}11972[#11972] +Time series visual builder:: +* Help text for writing painless scripts {pull}11936[#11936] +* Toggle for automatically applying changes to visualization {pull}11460[#11460] +Timepicker:: +* Improve accessibility of the datepicker {pull}11753[#11753] + + +[float] +[[bug-6.0.0-alpha2]] +=== Bug fixes +Timelion:: +* Timelion tutorial now persists until you close it {pull}11962[#11962] +Time series visual builder:: +* No longer trigger error in certain circumstances when using using percentiles {pull}11772[#11772] +Visualize:: +* Disable save button if visualization is dirty {pull}11576[#11576] diff --git a/docs/setup/connect-to-elasticsearch.asciidoc b/docs/setup/connect-to-elasticsearch.asciidoc index 18c22978e890a..0915ef51d03a3 100644 --- a/docs/setup/connect-to-elasticsearch.asciidoc +++ b/docs/setup/connect-to-elasticsearch.asciidoc @@ -32,7 +32,7 @@ sophisticated date parsing APIs that Kibana uses to determine date information, specify dates in the index pattern name. + . Click *Create* to add the index pattern. This first pattern is automatically configured as the default. -When you have more than one index pattern, you can designate which one to use as the default by clicking +When you have more than one index pattern, you can designate which one to use as the default by clicking on the star icon above the index pattern title from *Management > Index Patterns*. All done! Kibana is now connected to your Elasticsearch data. Kibana displays a read-only list of fields @@ -58,7 +58,7 @@ Getting Started>> tutorial. [float] [[kibana-dynamic-mapping]] === Kibana and Elasticsearch Dynamic Mapping -By default, Elasticsearch enables {es-ref}dynamic-mapping.html[dynamic mapping] for fields. Kibana needs +By default, Elasticsearch enables {ref}/dynamic-mapping.html[dynamic mapping] for fields. Kibana needs dynamic mapping to use fields in visualizations correctly, as well as to manage the `.kibana` index where saved searches, visualizations, and dashboards are stored. diff --git a/docs/setup/docker.asciidoc b/docs/setup/docker.asciidoc index b868b4910d840..6e5e0d91be28c 100644 --- a/docs/setup/docker.asciidoc +++ b/docs/setup/docker.asciidoc @@ -1,12 +1,16 @@ [[docker]] == Running Kibana on Docker -Docker images for Kibana are available from the Elastic Docker registry. +Docker images for Kibana are available from the Elastic Docker registry. The +base image is https://hub.docker.com/_/centos/[centos:7] and the source code +can be found on +https://github.com/elastic/kibana-docker/tree/{branch}[GitHub]. + The images are shipped with https://www.elastic.co/products/x-pack[X-Pack] installed. NOTE: https://www.elastic.co/guide/en/x-pack/current/index.html[X-Pack] is pre-installed in this image. With X-Pack installed, Kibana expects to -connect to an Elasticsearch cluster that is also runnning X-Pack. +connect to an Elasticsearch cluster that is also running X-Pack. === Pulling the image Obtaining Kibana for Docker is as simple as issuing a +docker pull+ command @@ -34,7 +38,8 @@ endif::[] === Configuring Kibana on Docker The Docker image provides several methods for configuring Kibana. The conventional -approach is to provide a `kibana.yml` file as described in <>, but it's +approach is to provide a `kibana.yml` file as described in {kibana-ref}/settings.html[Configuring +Kibana], but it's also possible to use environment variables to define settings. [[docker-bind-mount-config]] @@ -45,6 +50,7 @@ With +docker-compose+, the bind-mount can be specified like this: ["source","yaml",subs="attributes"] -------------------------------------------- +version: '3' services: kibana: image: {docker-image} @@ -55,62 +61,33 @@ services: [[docker-env-config]] ==== Environment variable configuration -Under Docker, Kibana can be configured via environment variables. The following -mappings are available: +Under Docker, Kibana can be configured via environment variables. When +the container starts, a helper process checks the environment for variables that +can be mapped to Kibana command-line arguments. + +For compatibility with container orchestration systems, these +environment variables are written in all capitals, with underscores as +word separators. The helper translates these names to valid +Kibana setting names. + +Some example translations are shown here: -.Docker Environment Variables +.Example Docker Environment Variables [horizontal] **Environment Variable**:: **Kibana Setting** -`ELASTICSEARCH_CUSTOMHEADERS`:: `elasticsearch.customHeaders` -`ELASTICSEARCH_PASSWORD`:: `elasticsearch.password` -`ELASTICSEARCH_PINGTIMEOUT`:: `elasticsearch.pingTimeout` -`ELASTICSEARCH_PRESERVEHOST`:: `elasticsearch.preserveHost` -`ELASTICSEARCH_REQUESTHEADERSWHITELIST`:: `elasticsearch.requestHeadersWhitelist` -`ELASTICSEARCH_REQUESTTIMEOUT`:: `elasticsearch.requestTimeout` -`ELASTICSEARCH_SHARDTIMEOUT`:: `elasticsearch.shardTimeout` -`ELASTICSEARCH_SSL_CA`:: `elasticsearch.ssl.ca` -`ELASTICSEARCH_SSL_CERT`:: `elasticsearch.ssl.cert` -`ELASTICSEARCH_SSL_KEY`:: `elasticsearch.ssl.key` -`ELASTICSEARCH_SSL_VERIFY`:: `elasticsearch.ssl.verify` -`ELASTICSEARCH_STARTUPTIMEOUT`:: `elasticsearch.startupTimeout` -`ELASTICSEARCH_URL`:: `elasticsearch.url` -`ELASTICSEARCH_USERNAME`:: `elasticsearch.username` -`KIBANA_DEFAULTAPPID`:: `kibana.defaultAppId` -`KIBANA_INDEX`:: `kibana.index` -`LOGGING_DEST`:: `logging.dest` -`LOGGING_QUIET`:: `logging.quiet` -`LOGGING_SILENT`:: `logging.silent` -`LOGGING_VERBOSE`:: `logging.verbose` -`OPS_INTERVAL`:: `ops.interval` -`PID_FILE`:: `pid.file` -`SERVER_BASEPATH`:: `server.basePath` -`SERVER_HOST`:: `server.host` -`SERVER_MAXPAYLOADBYTES`:: `server.maxPayloadBytes` `SERVER_NAME`:: `server.name` -`SERVER_PORT`:: `server.port` -`SERVER_SSL_CERT`:: `server.ssl.cert` -`SERVER_SSL_KEY`:: `server.ssl.key` -`XPACK_MONITORING_ELASTICSEARCH_URL`:: `xpack.monitoring.elasticsearch.url` -`XPACK_MONITORING_ELASTICSEARCH_USERNAME`:: `xpack.monitoring.elasticsearch.username` -`XPACK_MONITORING_ELASTICSEARCH_PASSWORD`:: `xpack.monitoring.elasticsearch.password` +`KIBANA_DEFAULTAPPID`:: `kibana.defaultAppId` `XPACK_MONITORING_ENABLED`:: `xpack.monitoring.enabled` -`XPACK_MONITORING_MAX_BUCKET_SIZE`:: `xpack.monitoring.max_bucket_size` -`XPACK_MONITORING_MIN_INTERVAL_SECONDS`:: `xpack.monitoring.min_interval_seconds` -`XPACK_MONITORING_NODE_RESOLVER`:: `xpack.monitoring.node_resolver` -`XPACK_MONITORING_REPORT_STATS`:: `xpack.monitoring.report_stats` -`XPACK_MONITORING_KIBANA_COLLECTION_ENABLED`:: `xpack.monitoring.kibana.collection.enabled` -`XPACK_MONITORING_KIBANA_COLLECTION_INTERVAL`:: `xpack.monitoring.kibana.collection.interval` -`XPACK_MONITORING_UI_CONTAINER_ELASTICSEARCH_ENABLED`:: `xpack.monitoring.ui.container.elasticsearch.enabled` -`XPACK_SECURITY_ENABLED`:: `xpack.security.enabled` -`XPACK_SECURITY_COOKIENAME`:: `xpack.security.cookieName` -`XPACK_SECURITY_ENCRYPTIONKEY`:: `xpack.security.encryptionKey` -`XPACK_SECURITY_SECURECOOKIES`:: `xpack.security.secureCookies` -`XPACK_SECURITY_SESSIONTIMEOUT`:: `xpack.security.sessionTimeout` + +In general, any setting listed in <> or +{xpack-ref}/xpack-settings.html[X-Pack Settings] can be configured +with this technique. These variables can be set with +docker-compose+ like this: ["source","yaml",subs="attributes"] ---------------------------------------------------------- +version: '3' services: kibana: image: {docker-image} @@ -119,7 +96,8 @@ services: ELASTICSEARCH_URL: http://elasticsearch.example.org ---------------------------------------------------------- -Environment variables take precedence over settings configured in `kibana.yml`. +Since environment variables are translated to CLI arguments, they take +precedence over settings configured in `kibana.yml`. ==== Docker defaults The following settings have different default values when using the Docker image: diff --git a/docs/setup/install.asciidoc b/docs/setup/install.asciidoc index bbc9b465f9bcf..25a414843f5c1 100644 --- a/docs/setup/install.asciidoc +++ b/docs/setup/install.asciidoc @@ -1,6 +1,8 @@ [[install]] == Installing Kibana +NOTE: Starting with version 6.0.0, Kibana only supports 64 bit operating systems. + Kibana is provided in the following package formats: [horizontal] @@ -36,11 +38,11 @@ with https://www.elastic.co/products/x-pack[X-Pack] pre-installed and is available from the Elastic Docker registry. + -<> +{kibana-ref}/docker.html[Running Kibana on Docker] -IMPORTANT: If your Elasticsearch installation is protected by {xpack-ref}xpack-security.html[X-Pack Security] -see {xpack-ref}kibana.html[Using Kibana with X-Pack Security] for additional setup -instructions. +IMPORTANT: If your Elasticsearch installation is protected by {xpack-ref}/xpack-security.html[X-Pack Security] +see {kibana-ref}/using-kibana-with-security.html[Configuring Security in Kibana] +for additional setup instructions. include::install/targz.asciidoc[] diff --git a/docs/setup/install/deb.asciidoc b/docs/setup/install/deb.asciidoc index a03ec5fa1b3d3..9dc9a5b4c7355 100644 --- a/docs/setup/install/deb.asciidoc +++ b/docs/setup/install/deb.asciidoc @@ -106,7 +106,6 @@ ifeval::["{release-state}"!="unreleased"] The Debian package for Kibana v{version} can be downloaded from the website and installed as follows: -*64 bit:* ["source","sh",subs="attributes"] -------------------------------------------- wget https://artifacts.elastic.co/downloads/kibana/kibana-{version}-amd64.deb @@ -116,16 +115,6 @@ sudo dpkg -i kibana-{version}-amd64.deb <1> Compare the SHA produced by `sha1sum` or `shasum` with the https://artifacts.elastic.co/downloads/kibana/kibana-{version}-amd64.deb.sha1[published SHA]. -*32 bit:* -["source","sh",subs="attributes"] --------------------------------------------- -wget https://artifacts.elastic.co/downloads/kibana/kibana-{version}-i386.deb -sha1sum kibana-{version}-i386.deb <1> -sudo dpkg -i kibana-{version}-i386.deb --------------------------------------------- -<1> Compare the SHA produced by `sha1sum` or `shasum` with the - https://artifacts.elastic.co/downloads/kibana/kibana-{version}-i386.deb.sha1[published SHA]. - endif::[] include::init-systemd.asciidoc[] @@ -160,7 +149,7 @@ include::systemd.asciidoc[] Kibana loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in -<>. +{kibana-ref}/settings.html[Configuring Kibana]. [[deb-layout]] ==== Directory layout of Debian package diff --git a/docs/setup/install/rpm.asciidoc b/docs/setup/install/rpm.asciidoc index 9d6f17a15f7fd..3c1c00d98735c 100644 --- a/docs/setup/install/rpm.asciidoc +++ b/docs/setup/install/rpm.asciidoc @@ -97,7 +97,6 @@ ifeval::["{release-state}"!="unreleased"] The RPM for Kibana v{version} can be downloaded from the website and installed as follows: -*64 bit:* ["source","sh",subs="attributes"] -------------------------------------------- wget https://artifacts.elastic.co/downloads/kibana/kibana-{version}-x86_64.rpm @@ -107,16 +106,6 @@ sudo rpm --install kibana-{version}-x86_64.rpm <1> Compare the SHA produced by `sha1sum` or `shasum` with the https://artifacts.elastic.co/downloads/kibana/kibana-{version}-x86_64.rpm.sha1[published SHA]. -*32 bit:* -["source","sh",subs="attributes"] --------------------------------------------- -wget https://artifacts.elastic.co/downloads/kibana/kibana-{version}-i686.rpm -sha1sum kibana-{version}-i686.rpm <1> -sudo rpm --install kibana-{version}-i686.rpm --------------------------------------------- -<1> Compare the SHA produced by `sha1sum` or `shasum` with the - https://artifacts.elastic.co/downloads/kibana/kibana-{version}-i686.rpm.sha1[published SHA]. - endif::[] include::init-systemd.asciidoc[] @@ -152,7 +141,7 @@ include::systemd.asciidoc[] Kibana loads its configuration from the `/etc/kibana/kibana.yml` file by default. The format of this config file is explained in -<>. +{kibana-ref}/settings.html[Configuring Kibana]. [[rpm-layout]] ==== Directory layout of RPM diff --git a/docs/setup/install/targz.asciidoc b/docs/setup/install/targz.asciidoc index 4e316bc8eb800..f346ec047ce26 100644 --- a/docs/setup/install/targz.asciidoc +++ b/docs/setup/install/targz.asciidoc @@ -21,7 +21,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The 64-bit Linux archive for Kibana v{version} can be downloaded and installed as follows: +The Linux archive for Kibana v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- @@ -37,33 +37,6 @@ cd kibana/ <2> endif::[] -[[install-linux32]] -==== Download and install the Linux 32-bit package - -ifeval::["{release-state}"=="unreleased"] - -Version {version} of Kibana has not yet been released. - -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -The 32-bit Linux archive for Kibana v{version} can be downloaded and installed as follows: - -["source","sh",subs="attributes"] --------------------------------------------- -wget https://artifacts.elastic.co/downloads/kibana/kibana-{version}-linux-x86.tar.gz -sha1sum kibana-{version}-linux-x86.tar.gz <1> -tar -xzf kibana-{version}-linux-x86.tar.gz -cd kibana/ <2> --------------------------------------------- -<1> Compare the SHA produced by `sha1sum` or `shasum` with the - https://artifacts.elastic.co/downloads/kibana/kibana-{version}-linux-x86.tar.gz.sha1[published SHA]. -<2> This directory is known as `$KIBANA_HOME`. - -endif::[] - - [[install-darwin64]] ==== Download and install the Darwin package @@ -110,7 +83,7 @@ standard output (`stdout`), and can be stopped by pressing `Ctrl-C`. Kibana loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in -<>. +{kibana-ref}/settings.html[Configuring Kibana]. [[targz-layout]] diff --git a/docs/setup/install/windows.asciidoc b/docs/setup/install/windows.asciidoc index 6d94e5cefc7bb..bd7c414cc03e2 100644 --- a/docs/setup/install/windows.asciidoc +++ b/docs/setup/install/windows.asciidoc @@ -20,16 +20,16 @@ endif::[] ifeval::["{release-state}"!="unreleased"] Download the `.zip` windows archive for Kibana v{version} from -https://artifacts.elastic.co/downloads/kibana/kibana-{version}-windows-x86.zip +https://artifacts.elastic.co/downloads/kibana/kibana-{version}-windows-x86_64.zip Unzip it with your favourite unzip tool. This will create a folder called -kibana-{version}-windows-x86, which we will refer to as `$KIBANA_HOME`. In a +kibana-{version}-windows-x86_64, which we will refer to as `$KIBANA_HOME`. In a terminal window, `CD` to the `$KIBANA_HOME` directory, for instance: ["source","sh",subs="attributes"] ---------------------------- -CD c:\kibana-{version}-windows-x86 +CD c:\kibana-{version}-windows-x86_64 ---------------------------- endif::[] @@ -52,7 +52,7 @@ and can be stopped by pressing `Ctrl-C`. Kibana loads its configuration from the `$KIBANA_HOME/config/kibana.yml` file by default. The format of this config file is explained in -<>. +{kibana-ref}/settings.html[Configuring Kibana]. [[windows-layout]] ==== Directory layout of `.zip` archive diff --git a/docs/setup/production.asciidoc b/docs/setup/production.asciidoc index ca0b2e64d4f29..042b0ff98768e 100644 --- a/docs/setup/production.asciidoc +++ b/docs/setup/production.asciidoc @@ -21,7 +21,7 @@ and an Elasticsearch client node on the same machine. For more information, see [[configuring-kibana-shield]] === Using Kibana with X-Pack -You can use {xpack-ref}xpack-security.html[X-Pack Security] to control what +You can use {xpack-ref}/xpack-security.html[X-Pack Security] to control what Elasticsearch data users can access through Kibana. When you install X-Pack, Kibana users have to log in. They need to @@ -34,7 +34,8 @@ not exist. X-Pack Security does not currently provide a way to control which users can load which dashboards. For information about setting up Kibana users and how to configure Kibana -to work with X-Pack, see {xpack-ref}kibana.html. +to work with X-Pack, see +{kibana-ref}/using-kibana-with-security.html[Configuring Security in Kibana]. [float] [[enabling-ssl]] @@ -84,7 +85,7 @@ across the nodes is to run an Elasticsearch _Coordinating only_ node on the same Elasticsearch Coordinating only nodes are essentially smart load balancers that are part of the cluster. They process incoming HTTP requests, redirect operations to the other nodes in the cluster as needed, and gather and return the results. For more information, see -{es-ref}modules-node.html[Node] in the Elasticsearch reference. +{ref}/modules-node.html[Node] in the Elasticsearch reference. To use a local client node to load balance Kibana requests: @@ -98,7 +99,7 @@ To use a local client node to load balance Kibana requests: # node.master: false node.data: false -node.ingest: false +node.ingest: false -------- . Configure the client node to join your Elasticsearch cluster. In `elasticsearch.yml`, set the `cluster.name` to the name of your cluster. diff --git a/docs/setup/settings.asciidoc b/docs/setup/settings.asciidoc index 04c2de1f58cc1..5f7a75e31b061 100644 --- a/docs/setup/settings.asciidoc +++ b/docs/setup/settings.asciidoc @@ -6,7 +6,6 @@ on `localhost:5601`. To change the host or port number, or connect to Elasticsea you'll need to update your `kibana.yml` file. You can also enable SSL and set a variety of other options. .Kibana Configuration Settings -[horizontal] `server.port:`:: *Default: 5601* Kibana is served by a back end server. This setting specifies the port to use. `server.host:`:: *Default: "localhost"* This setting specifies the host of the back end server. `server.basePath:`:: Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects @@ -15,6 +14,7 @@ you'll need to update your `kibana.yml` file. You can also enable SSL and set a `server.maxPayloadBytes:`:: *Default: 1048576* The maximum payload size in bytes for incoming server requests. `server.name:`:: *Default: "your-hostname"* A human-readable display name that identifies this Kibana instance. `server.defaultRoute:`:: *Default: "/app/kibana"* This setting specifies the default route when opening Kibana. You can use this setting to modify the landing page when opening Kibana. +`server.customResponseHeaders:`:: *Default: `{}`* Header names and values to send on all responses to the client from the Kibana server. `elasticsearch.url:`:: *Default: "http://localhost:9200"* The URL of the Elasticsearch instance to use for all your queries. `elasticsearch.preserveHost:`:: *Default: true* When this setting’s value is true Kibana uses the hostname specified in @@ -24,22 +24,45 @@ to this Kibana instance. dashboards. Kibana creates a new index if the index doesn’t already exist. `kibana.defaultAppId:`:: *Default: "discover"* The default application to load. [[tilemap-settings]]`tilemap.url:`:: The URL to the tile -service that Kibana uses to display map tiles in tilemap visualizations. By default, Kibana reads this url from an external metadata service, but users can still override this parameter to use their own Tile Map Service. For example: `"https://tiles.elastic.co/v2/default/{z}/{x}/{y}.png?elastic_tile_service_tos=agree&my_app_name=kibana"` +service that Kibana uses to display map tiles in tilemap visualizations. By default, Kibana reads this url from an external metadata service, but users can still override this parameter to use their own Tile Map Service. For example: `"https://tiles.elastic.co/v2/default/{z}/{x}/{y}.png?elastic_tile_service_tos=agree&my_app_name=kibana"` `tilemap.options.minZoom:`:: *Default: 1* The minimum zoom level. `tilemap.options.maxZoom:`:: *Default: 10* The maximum zoom level. -`tilemap.options.attribution:`:: *Default: `"Š [Elastic Tile Service](https://www.elastic.co/elastic-tile-service)"`* The map attribution string. +`tilemap.options.attribution:`:: *Default: `"Š [Elastic Maps Service](https://www.elastic.co/elastic-maps-service)"`* The map attribution string. `tilemap.options.subdomains:`:: An array of subdomains used by the tile service. Specify the position of the subdomain the URL with the token `{s}`. + +[[regionmap-settings]] `regionmap`:: Specifies additional vector layers for use in <> visualizations. +Each layer object points to an external vector file that contains a geojson FeatureCollection. +The file must use the WGS84 coordinate reference system and only include polygons. +If the file is hosted on a separate domain from Kibana, the server needs to be CORS-enabled so Kibana can download the file. +The url field also serves as a unique identifier for the file. +Each layer can contain multiple fields to indicate what properties from the geojson features you want to expose. +The field.description is the human readable text that is shown in the Region Map visualization's field menu. +An optional attribution value can be added as well. +The following example shows a valid regionmap configuration. + + regionmap: + layers: + - name: "Departments of France" + url: "http://my.cors.enabled.server.org/france_departements.geojson" + attribution: "INRAP" + fields: + - name: "department" + description: "Full department name" + - name: "INSEE" + description: "INSEE numeric identifier" + `elasticsearch.username:` and `elasticsearch.password:`:: If your Elasticsearch is protected with basic authentication, these settings provide the username and password that the Kibana server uses to perform maintenance on the Kibana index at startup. Your Kibana users still need to authenticate with Elasticsearch, which is proxied through the Kibana server. `server.ssl.enabled`:: *Default: "false"* Enables SSL for outgoing requests from the Kibana server to the browser. When set to `true`, `server.ssl.certificate` and `server.ssl.key` are required `server.ssl.certificate:` and `server.ssl.key:`:: Paths to the PEM-format SSL certificate and SSL key files, respectively. `server.ssl.keyPassphrase`:: The passphrase that will be used to decrypt the private key. This value is optional as the key may not be encrypted. +`server.ssl.redirectHttpFromPort` :: Kibana will bind to this port and redirect all http requests to https over the port configured as `server.port`. `server.ssl.certificateAuthorities`:: List of paths to PEM encoded certificate files that should be trusted. `server.ssl.supportedProtocols`:: *Default: TLSv1, TLSv1.1, TLSv1.2* Supported protocols with versions. Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2` `server.ssl.cipherSuites`:: *Default: ECDHE-RSA-AES128-GCM-SHA256, ECDHE-ECDSA-AES128-GCM-SHA256, ECDHE-RSA-AES256-GCM-SHA384, ECDHE-ECDSA-AES256-GCM-SHA384, DHE-RSA-AES128-GCM-SHA256, ECDHE-RSA-AES128-SHA256, DHE-RSA-AES128-SHA256, ECDHE-RSA-AES256-SHA384, DHE-RSA-AES256-SHA384, ECDHE-RSA-AES256-SHA256, DHE-RSA-AES256-SHA256, HIGH,!aNULL, !eNULL, !EXPORT, !DES, !RC4, !MD5, !PSK, !SRP, !CAMELLIA*. Details on the format, and the valid options, are available via the [OpenSSL cipher list format documentation](https://www.openssl.org/docs/man1.0.2/apps/ciphers.html#CIPHER-LIST-FORMAT) -`elasticsearch.ssl.cert:` and `elasticsearch.ssl.key:`:: Optional settings that provide the paths to the PEM-format SSL +`elasticsearch.ssl.certificate:` and `elasticsearch.ssl.key:`:: Optional settings that provide the paths to the PEM-format SSL certificate and key files. These files validate that your Elasticsearch backend uses the same key files. `elasticsearch.ssl.keyPassphrase`:: The passphrase that will be used to decrypt the private key. This value is optional as the key may not be encrypted. `elasticsearch.ssl.certificateAuthorities:`:: Optional setting that enables you to specify a list of paths to the PEM file for the certificate @@ -69,6 +92,8 @@ information and all requests. The minimum value is 100. `status.allowAnonymous`:: *Default: false* If authentication is enabled, setting this to `true` allows unauthenticated users to access the Kibana server status API and status page. +`cpu.cgroup.path.override`:: Override for cgroup cpu path when mounted in manner that is inconsistent with `/proc/self/cgroup` +`cpuacct.cgroup.path.override`:: Override for cgroup cpuacct path when mounted in manner that is inconsistent with `/proc/self/cgroup` `console.enabled`:: *Default: true* Set to false to disable Console. Toggling this will cause the server to regenerate assets on the next startup, which may cause a delay before pages start being served. `elasticsearch.tribe.url:`:: Optional URL of the Elasticsearch tribe instance to use for all your @@ -76,12 +101,12 @@ queries. `elasticsearch.tribe.username:` and `elasticsearch.tribe.password:`:: If your Elasticsearch is protected with basic authentication, these settings provide the username and password that the Kibana server uses to perform maintenance on the Kibana index at startup. Your Kibana users still need to authenticate with Elasticsearch, which is proxied through the Kibana server. -`elasticsearch.tribe.ssl.cert:` and `elasticsearch.tribe.ssl.key:`:: Optional settings that provide the paths to the PEM-format SSL +`elasticsearch.tribe.ssl.certificate:` and `elasticsearch.tribe.ssl.key:`:: Optional settings that provide the paths to the PEM-format SSL certificate and key files. These files validate that your Elasticsearch backend uses the same key files. -`elasticsearch.tribe.ssl.ca:`:: Optional setting that enables you to specify a path to the PEM file for the certificate -authority for your Elasticsearch instance. -`elasticsearch.tribe.ssl.verify:`:: *Default: true* To disregard the validity of SSL certificates, change this setting’s value -to `false`. +`elasticsearch.tribe.ssl.keyPassphrase`:: The passphrase that will be used to decrypt the private key. This value is optional as the key may not be encrypted. +`elasticsearch.tribe.ssl.certificateAuthorities:`:: Optional setting that enables you to specify a path to the PEM file for the certificate +authority for your tribe Elasticsearch instance. +`elasticsearch.tribe.ssl.verificationMode:`:: *Default: full* Controls the verification of certificates. Valid values are `none`, `certificate`, and `full`. `full` performs hostname verification, and `certificate` does not. `elasticsearch.tribe.pingTimeout:`:: *Default: the value of the `elasticsearch.tribe.requestTimeout` setting* Time in milliseconds to wait for Elasticsearch to respond to pings. `elasticsearch.tribe.requestTimeout:`:: *Default: 30000* Time in milliseconds to wait for responses from the back end or diff --git a/docs/setup/tribe.asciidoc b/docs/setup/tribe.asciidoc index 024489b8eba66..b19a2d67cb1c9 100644 --- a/docs/setup/tribe.asciidoc +++ b/docs/setup/tribe.asciidoc @@ -1,7 +1,12 @@ [[tribe]] == Using Kibana with Tribe nodes -Kibana can be configured to connect to a https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-tribe.html[tribe node] for data retrieval. Because tribe nodes can't create indices, Kibana additionally +NOTE: While tribe nodes have been deprecated in Elasticsearch in favor of +<>, you can still use Kibana with tribe nodes until +version 7.0. Unlike tribe nodes, using cross cluster search in Kibana requires no +server-side configurations and doesn't disable functionality like <>. + +Kibana can be configured to connect to a {ref}/modules-tribe.html[tribe node] for data retrieval. Because tribe nodes can't create indices, Kibana additionally requires a separate connection to a node to maintain state. When configured, searches and visualizations will retrieve data using the tribe node and administrative actions (such as saving a dashboard) will be sent to non-tribe node. @@ -21,7 +26,8 @@ When configured to use a tribe node, actions that modify Kibana's state will be will retrieve data from the node at `elasticsearch.tribe.url`. It's acceptable to use a node for `elasticsearch.url` that is part of one of the clusters that a tribe node is pointing to. -The full list of configurations can be found at <>. +The full list of configurations can be found at {kibana-ref}/settings.html[Configuring +Kibana]. [float] [[tribe-limitations]] diff --git a/docs/setup/upgrade.asciidoc b/docs/setup/upgrade.asciidoc index 9ec868f3f494d..1f3ebb6f3472f 100644 --- a/docs/setup/upgrade.asciidoc +++ b/docs/setup/upgrade.asciidoc @@ -8,7 +8,7 @@ Before upgrading Kibana: * Consult the <> docs. * Test upgrades in a dev environment before upgrading your production servers. * Backup your data using the Elasticsearch - {es-ref}modules-snapshots.html[snapshots] feature. + {ref}/modules-snapshots.html[snapshots] feature. You **cannot roll back** to an earlier version unless you have a backup of your data. * If you are using custom plugins, check that a compatible version is diff --git a/docs/setup/upgrade/upgrade-standard-reindex.asciidoc b/docs/setup/upgrade/upgrade-standard-reindex.asciidoc index 61dc8a11eeb97..a376540fe0e84 100644 --- a/docs/setup/upgrade/upgrade-standard-reindex.asciidoc +++ b/docs/setup/upgrade/upgrade-standard-reindex.asciidoc @@ -12,10 +12,10 @@ Reindexing is the process of creating a new index with updated syntax and mappings directly from an existing index. While it is possible to do this manually, we recommend using the Elasticsearch Migration Plugin as described in the Elasticsearch -{es-ref}reindex-upgrade.html#reindex-upgrade[Reindex to upgrade] guide. +{ref}/reindex-upgrade.html#reindex-upgrade[Reindex to upgrade] guide. NOTE: The Elasticsearch Migration Plugin creates a versioned `.kibana` index -as well as an {es-ref}indices-aliases.html[index alias] that points to it. +as well as an {ref}/indices-aliases.html[index alias] that points to it. Kibana 5.0 supports this index alias, but if you want to run Kibana 4.x while this Elastic stack upgrade is underway, you'll need to configure your Kibana 4.x install to point to the versioned index using the `kibana.index` diff --git a/docs/setup/upgrade/upgrade-standard.asciidoc b/docs/setup/upgrade/upgrade-standard.asciidoc index 6c8afa90e5918..2266bc232f70b 100644 --- a/docs/setup/upgrade/upgrade-standard.asciidoc +++ b/docs/setup/upgrade/upgrade-standard.asciidoc @@ -9,7 +9,7 @@ upgrade is supported for your version of Kibana. NOTE: If you've saved and/or exported objects in Kibana that rely on the <>, make sure to check the Elasticsearch -{es-ref}breaking-changes.html[breaking changes] documentation and take the +{ref}/breaking-changes.html[breaking changes] documentation and take the necessary remediation steps as per those instructions. [float] @@ -52,4 +52,3 @@ otherwise Kibana will fail to start. <> documentation for more information. . Stop the old Kibana process. . Start the new Kibana process. - diff --git a/docs/timelion.asciidoc b/docs/timelion.asciidoc index 6fe13670f9a4e..7eb22dd9fe915 100644 --- a/docs/timelion.asciidoc +++ b/docs/timelion.asciidoc @@ -15,25 +15,15 @@ For example, Timelion enables you to easily get the answers to questions like: * What percent of Japan's population came to my site today? * What's the 10-day moving average of the S&P 500? * What's the cumulative sum of all search requests received in the last 2 years? --- - -[[timelion-createviz]] -== Creating Time Series Visualizations -To start building time-series visualizations, click **Timelion** in the side -navigation and run through the tutorial. Documentation for the Timelion -expression language is built-in. Click **Docs** in the toolbar to view -the available functions and access the tutorial. As you start to enter -functions in the query bar, Timelion displays the available arguments: - -image::images/timelion-arg-help.jpg["Timelion inline help"] +{nbsp} -To incorporate a Timelion visualization into a Kibana dashboard, save the -Timelion expression as a Kibana dashboard panel. You can then add it to -a dashboard like any other visualization. - -TIP: You can also create time series visualizations right from the Visualize -app--just select the Timelion visualization type and enter a Timelion -expression in the expression field. +You might also be interested in these tutorial videos: +* https://www.elastic.co/elasticon/conf/2017/sf/timelion-magic-math-and-everything-in-the-middle[Timelion: Magic, Math, and Everything in the Middle] +* https://www.elastic.co/videos/timelion-plugin-for-kibana-enables-times-series-paris-meetup[Timelion Plugin for Kibana Enables Time Series] +* https://www.elastic.co/videos/using-kibana-and-timelion-to-analyze-earthquake-data[Using Kibana and Timelion to Analyze Earthquake Data] +-- +include::timelion/timelion-getting-started.asciidoc[] +include::timelion/timelion-inline-help.asciidoc[] \ No newline at end of file diff --git a/docs/timelion/getting-started/timelion-conditional.asciidoc b/docs/timelion/getting-started/timelion-conditional.asciidoc new file mode 100644 index 0000000000000..f9353e4bbca72 --- /dev/null +++ b/docs/timelion/getting-started/timelion-conditional.asciidoc @@ -0,0 +1,62 @@ +[[timelion-conditional]] +=== Using conditional logic and tracking trends + +In this section you will learn how to modify time series data with conditional logic and create a trend with a moving average. This is helpful to easily detect outliers and patterns over time. + +For the purposes of this tutorial, you will continue to use https://www.elastic.co/downloads/beats/metricbeat[Metricbeat data] to add another visualization that monitors memory consumption. To start, use the following expression to chart the maximum value of `system.memory.actual.used.bytes`. + +[source,text] +---------------------------------- +.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes') +---------------------------------- + +image::images/timelion-conditional01.png[] +{nbsp} + +Let’s create two thresholds to keep an eye on the amount of used memory. For the purposes of this tutorial, your warning threshold will be 12.5GB and your severe threshold will be 15GB. When the maximum amount of used memory exceeds either of these thresholds, the series will be colored accordingly. + +NOTE: If the threshold values are too high or low for your machine, please adjust accordingly. + +To configure these two threshold values, you can utilize Timelion's conditional logic. In this tutorial you will use `if()` to compare each point to a number, adjust the styling if the condition evaluates to `true` and use the default styling if the condition evaluates to `false`. Timelion offers the following six operator values for comparison. + +[horizontal] +`eq`:: equal +`ne`:: not equal +`lt`:: less than +`lte`:: less than or equal to +`gt`:: greater than +`gte`:: greater than or equal to + +Since there are two thresholds, it makes sense to style them differently. Use the `gt` operator to color the warning threshold yellow with `.color('#FFCC11')` and the severe threshold red with `.color('red')`. Enter the following expression into the Timelion query bar to apply the conditional logic and threshold styling: + +[source,text] +---------------------------------- +.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').if(gt,12500000000,.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'),null).label('warning').color('#FFCC11'), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').if(gt,15000000000,.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'),null).label('severe').color('red') +---------------------------------- + +image::images/timelion-conditional02.png[] +{nbsp} + +For additional information on Timelions conditional capabilities, check out the blog post https://www.elastic.co/blog/timeseries-if-then-else-with-timelion[I have but one .condition()]. + +Now that you have thresholds defined to easily identify outliers, let’s create a new series to determine what the trend really is. Timelion's `mvavg()` function allows you to calculate the moving average over a given window. This is especially helpful for noisey time series. For this tutorial, you will use `.mvavg(10)` to create a moving average with a window of 10 data points. Use the following expression to create a moving average of the maximum memory usage: + +[source,text] +---------------------------------- +.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').if(gt,12500000000,.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'),null).label('warning').color('#FFCC11'), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').if(gt,15000000000,.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'),null).label('severe').color('red'), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').mvavg(10) +---------------------------------- + +image::images/timelion-conditional03.png[] +{nbsp} + +Now that you have thresholds and a moving average, let's format the visualization so it is a bit easier to consume. As with the last section, use the `.color()`, `.line()`, `.title()` and `.legend()` functions to update your visualization accordingly: + +[source,text] +---------------------------------- +.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').label('max memory').title('Memory consumption over time'), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').if(gt,12500000000,.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'),null).label('warning').color('#FFCC11').lines(width=5), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').if(gt,15000000000,.es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes'),null).label('severe').color('red').lines(width=5), .es(index=metricbeat-*, timefield='@timestamp', metric='max:system.memory.actual.used.bytes').mvavg(10).label('mvavg').lines(width=2).color(#5E5E5E).legend(columns=4, position=nw) +---------------------------------- + +image::images/timelion-conditional04.png[] +{nbsp} + +Save your Timelion sheet and continue on to the next section to add these new visualizations to your dashboard. diff --git a/docs/timelion/getting-started/timelion-create.asciidoc b/docs/timelion/getting-started/timelion-create.asciidoc new file mode 100644 index 0000000000000..032b1c72e0094 --- /dev/null +++ b/docs/timelion/getting-started/timelion-create.asciidoc @@ -0,0 +1,37 @@ +[[timelion-create]] +=== Creating time series visualizations + +This tutorial will be using the time series data from https://www.elastic.co/guide/en/beats/metricbeat/current/index.html[Metricbeat] to walk you through a number of functions that Timelion offers. To get started, download Metricbeat and follow the https://www.elastic.co/downloads/beats/metricbeat[instructions here] to start ingesting the data locally. + +The first visualization you'll create will compare the real-time percentage of CPU time spent in user space to the results offset by one hour. In order to create this visualization, we’ll need to create two Timelion expressions. One with the real-time average of `system.cpu.user.pct` and another with the average offset by one hour. + +To start, you will need to define an `index`, `timefield` and `metric` in the first expression. Go ahead and enter the below expression into the Timelion query bar. + +[source,text] +---------------------------------- +.es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct') +---------------------------------- + +image::images/timelion-create01.png[] +{nbsp} + +Now you need to add another series with data from the previous hour for comparison. To do so, you'll have to add an `offset` arguement to the `.es()` function. `offset` will offset the series retrieval by a date expression. For this example, you'll want to offset the data back one hour and will be using the date expression `-1h`. Using a comma to separate the two series, enter the following expression into the Timelion query bar: + +[source,text] +---------------------------------- +.es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct'), .es(offset=-1h,index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct') +---------------------------------- + +image::images/timelion-create02.png[] +{nbsp} + +It’s a bit hard to differentiate the two series. Customize the labels in order to easily distinguish them. You can always append the `.label()` function to any expression to add a custom label. Enter the below expression into the Timelion query bar to customize your labels: +[source,text] +---------------------------------- +.es(offset=-1h,index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('last hour'), .es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('current hour') +---------------------------------- + +image::images/timelion-create03.png[] +{nbsp} + +Save the entire Timelion sheet as _Metricbeat Example_. As a best practice, you should be saving any significant changes made to this sheet as you progress through this tutorial. diff --git a/docs/timelion/getting-started/timelion-customize.asciidoc b/docs/timelion/getting-started/timelion-customize.asciidoc new file mode 100644 index 0000000000000..9e1db5e20dcd1 --- /dev/null +++ b/docs/timelion/getting-started/timelion-customize.asciidoc @@ -0,0 +1,53 @@ +[[timelion-customize]] +=== Customize and format visualizations + +Timelion has plenty of options for customization. You can personalize nearly every aspect of a chart with the functions available. For this tutorial, you will perform the following modifications. + +* Add a title +* Change a series type +* Change the color and opacity of a series +* Modify the legend + +In the <>, you created a Timelion chart with two series. Let’s continue to customize this visualization. + +Before making any other modifications, append the `title()` function to the end of an expression to add a title with a meaningful name. This will make it much easier for unfamiliar users to understand the visualizations purpose. For this example, add `title('CPU usage over time')` to the original series. Use the following expression in your Timelion querybar: + +[source,text] +---------------------------------- +.es(offset=-1h,index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('last hour'), .es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('current hour').title('CPU usage over time') +---------------------------------- + +image::images/timelion-customize01.png[] +{nbsp} + +To differentiate the last hour series a bit more, you are going to change the chart type to an area chart. In order do so, you'll need to use the `.lines()` function to customize the line chart. You'll be setting the `fill` and `width` arguements to set the fill of the line chart and line width respectively. In this example, you will set the fill level to 1 and the width of the border to 0.5 by appending `.lines(fill=1,width=0.5)`. Use the following expression in the Timelion query bar: + +[source,text] +---------------------------------- +.es(offset=-1h,index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('last hour').lines(fill=1,width=0.5), .es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('current hour').title('CPU usage over time') +---------------------------------- + +image::images/timelion-customize02.png[] +{nbsp} + +Let’s color these series so that the current hour series pops a bit more than the last hour series. The `color()` function can be used to change the color of any series and accepts standard color names, hexadecimal values or a color schema for grouped series. For this example you will use `.color(gray)` for the last hour and `.color(#1E90FF)` for the current hour. Enter the following expression into the Timelion query bar to make the adjustments: + +[source,text] +---------------------------------- +.es(offset=-1h,index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('last hour').lines(fill=1,width=0.5).color(gray), .es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('current hour').title('CPU usage over time').color(#1E90FF) +---------------------------------- + +image::images/timelion-customize03.png[] +{nbsp} + +Last but not least, adjust the legend so that it takes up as little space as possible. You can utilize the `.legend()` function to set the position and style of the legend. For this example, place the legend in the north west position of the visualization with two columns by appending `.legend(columns=2, position=nw)` to the original series. Use the following expression to make the adjustments: + +[source,text] +---------------------------------- +.es(offset=-1h,index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('last hour').lines(fill=1,width=0.5).color(gray), .es(index=metricbeat-*, timefield='@timestamp', metric='avg:system.cpu.user.pct').label('current hour').title('CPU usage over time').color(#1E90FF).legend(columns=2, position=nw) +---------------------------------- + +image::images/timelion-customize04.png[] +{nbsp} + +Save your changes and continue on to the next section to learn about mathematical functions. diff --git a/docs/timelion/getting-started/timelion-math.asciidoc b/docs/timelion/getting-started/timelion-math.asciidoc new file mode 100644 index 0000000000000..561278db07e27 --- /dev/null +++ b/docs/timelion/getting-started/timelion-math.asciidoc @@ -0,0 +1,61 @@ +[[timelion-math]] +=== Using mathematical functions + +You’ve learned how to create and style a Timelion visualization in the previous two sections. This section will explore the mathematical functions Timelion offers. You will continue to use the https://www.elastic.co/downloads/beats/metricbeat[Metricbeat data] to create a new Timelion visualization for inbound and outbound network traffic. To start, you'll need to add a new Timelion visualization to the sheet. + +In the top menu, click `Add` to add a second visualization. When added to the sheet, you’ll notice that the query bar has been replaced with the default `.es(*)` expression. This is because the query is associated with the visualization on the Timelion sheet you have selected. + +image::images/timelion-math01.png[] +{nbsp} + +To start tracking the inbound / outbound network traffic, your first expression will calculate the maximum value of `system.network.in.bytes`. Enter the expression below into your Timelion query bar: + +[source,text] +---------------------------------- +.es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.in.bytes) +---------------------------------- + +image::images/timelion-math02.png[] +{nbsp} + +Monitoring network traffic is much more valuable when plotting the rate of change. The `derivative()` function is used do just that - plot the change in values over time. This can be easily done by appending the `.derivative()` to the end of an expression. Use the following expression to update your visualization: + +[source,text] +---------------------------------- +.es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.in.bytes).derivative() +---------------------------------- + +image::images/timelion-math03.png[] +{nbsp} + +Now for the outbound traffic. You'll need to add a similar calculation for `system.network.out.bytes`. Since outbound traffic is leaving your machine, it makes sense to represent this metric as a negative number. The `.multiply()` function will multiply the series by a number, the result of a series or a list of series. For this example, you will use `.multiply(-1)` to convert the outbound network traffic to a negative value. Use the following expression to update your visualization: + +[source,text] +---------------------------------- +.es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.in.bytes).derivative(), .es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.out.bytes).derivative().multiply(-1) +---------------------------------- + +image::images/timelion-math04.png[] +{nbsp} + +To make this visualization a bit easier to consume, convert the series from bytes to megabytes. Timelion has a `.divide()` function that can be used. `.divide()` accepts the same input as `.multiply()` and will divide the series by the divisor defined. Use the following expression to update your visualization: + +[source,text] +---------------------------------- +.es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.in.bytes).derivative().divide(1048576), .es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.out.bytes).derivative().multiply(-1).divide(1048576) +---------------------------------- + +image::images/timelion-math05.png[] +{nbsp} + +Utilizing the formatting functions `.title()`, `.label()`, `.color()`, `.lines()` and `.legend()` learned in <>, let’s clean up the visualization a bit. Use the following expression to update your visualization: + +[source,text] +---------------------------------- +.es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.in.bytes).derivative().divide(1048576).lines(fill=2, width=1).color(green).label("Inbound traffic").title("Network traffic (MB/s)"), .es(index=metricbeat*, timefield=@timestamp, metric=max:system.network.out.bytes).derivative().multiply(-1).divide(1048576).lines(fill=2, width=1).color(blue).label("Outbound traffic").legend(columns=2, position=nw) +---------------------------------- + +image::images/timelion-math06.png[] +{nbsp} + +Save your changes and continue on to the next section to learn about conditional logic and tracking trends. diff --git a/docs/timelion/getting-started/timelion-save.asciidoc b/docs/timelion/getting-started/timelion-save.asciidoc new file mode 100644 index 0000000000000..feaeb5468d8da --- /dev/null +++ b/docs/timelion/getting-started/timelion-save.asciidoc @@ -0,0 +1,25 @@ +[[timelion-save]] +=== Add to dashboard + +You’ve officially harnessed the power of Timelion to create time series visualizations. The final step of this tutorial is to add your new visualizations to a dashboard. Below, this section will show you how to save a visualization from your Timelion sheet and add it to an existing dashboard. + +To save a Timelion visualization as a dashboard panel, follow the steps below. + +. Select the visualization you’d like to add to one (or multiple) dashboards +. Click the `Save` option in the top menu +. Select `Save current expression as Kibana dashboard panel` +. Name your panel and and click `Save` to save as a dashboard visualization + +image::images/timelion-save01.png[] +{nbsp} + +Now you can add this dashboard panel to any dashboard you’d like. This visualization will now be listed in the Visualize list. Go ahead and follow the same process for the rest of the visualizations you created. + +Create a new dashboard or open an existing one to add the Timelion visualizations as you would any other visualization. + +image::images/timelion-save02.png[] +{nbsp} + +TIP: You can also create time series visualizations right from the Visualize +app--just select the Timeseries visualization type and enter a Timelion +expression in the expression field. diff --git a/docs/timelion/timelion-getting-started.asciidoc b/docs/timelion/timelion-getting-started.asciidoc new file mode 100644 index 0000000000000..a88e8b86c2fbe --- /dev/null +++ b/docs/timelion/timelion-getting-started.asciidoc @@ -0,0 +1,23 @@ +[[timelion-getting-started]] +== Getting Started + +Ready to experience all that is Timelion? This getting started tutorial shows +you how to: + +* <> +* <> +* <> +* <> +* <> +* <> + + +include::getting-started/timelion-create.asciidoc[] + +include::getting-started/timelion-customize.asciidoc[] + +include::getting-started/timelion-math.asciidoc[] + +include::getting-started/timelion-conditional.asciidoc[] + +include::getting-started/timelion-save.asciidoc[] \ No newline at end of file diff --git a/docs/timelion/timelion-inline-help.asciidoc b/docs/timelion/timelion-inline-help.asciidoc new file mode 100644 index 0000000000000..4f863deb4853c --- /dev/null +++ b/docs/timelion/timelion-inline-help.asciidoc @@ -0,0 +1,8 @@ +[[timelion-inline-help]] +== Inline Help and Documentation + +Can't remember a function or searching for a new one? You can always reference the inline help and documentation in Timelion. + +Documentation for the Timelion expression language is built-in. Click `Docs` in the top menu to view the available functions and access the inline reference. As you start to enter functions in the query bar, Timelion will display the relevant arguments in real time. + +image::images/timelion-arg-help.jpg["Timelion inline help"] diff --git a/docs/visualize.asciidoc b/docs/visualize.asciidoc index 7b3ca7b6a33e0..40620bc4661e8 100644 --- a/docs/visualize.asciidoc +++ b/docs/visualize.asciidoc @@ -8,7 +8,7 @@ Elasticsearch indices. You can then build <> that display related visualizations. Kibana visualizations are based on Elasticsearch queries. By using a -series of Elasticsearch {es-ref}search-aggregations.html[aggregations] +series of Elasticsearch {ref}/search-aggregations.html[aggregations] to extract and process your data, you can create charts that show you the trends, spikes, and dips you need to know about. @@ -22,21 +22,34 @@ or start with a new search query. To create a visualization: . Click on *Visualize* in the side navigation. +. Click the *Create new visualization* button or the **+** button. . Choose the visualization type: + +* *Basic charts* +[horizontal] +<>:: Compare different series in X/Y charts. +<>:: Shade cells within a matrix. +<>:: Display each source's contribution to a total. +* *Data* [horizontal] -<>:: Compare different series in X/Y charts. <>:: Display the raw data of a composed aggregation. -<>:: Display free-form information or -instructions. <>:: Display a single number. -<>:: Display each source's contribution to a total. -<>:: Display words as a cloud in which the size of the word correspond to its importance -<>:: Associate the results of an aggregation with geographic +<>:: Display a gauge. +* *Maps* +[horizontal] +<>:: Associate the results of an aggregation with geographic locations. +<>:: Thematic maps where a shape's color intensity corresponds to a metric's value. locations. -Timelion:: Compute and combine data from multiple time series +* *Time Series* +[horizontal] +<>:: Compute and combine data from multiple time series data sets. - +<>:: Visualize time series data using pipeline aggregations. +* *Other* +[horizontal] +<>:: Display words as a cloud in which the size of the word correspond to its importance +<>:: Display free-form information or +instructions. . Specify a search query to retrieve the data for your visualization: ** To enter new search criteria, select the index pattern for the indices that contain the data you want to visualize. This opens the visualization builder @@ -53,24 +66,46 @@ from the saved search. . In the visualization builder, choose the metric aggregation for the visualization's Y axis: -+ -* {es-ref}search-aggregations-metrics-valuecount-aggregation.html[count] -* {es-ref}search-aggregations-metrics-avg-aggregation.html[average] -* {es-ref}search-aggregations-metrics-sum-aggregation.html[sum] -* {es-ref}search-aggregations-metrics-min-aggregation.html[min] -* {es-ref}search-aggregations-metrics-max-aggregation.html[max] -* {es-ref}search-aggregations-metrics-cardinality-aggregation.html[unique count] -* {es-ref}search-aggregations-metrics-percentile-aggregation.html[median] (50th percentile) -* {es-ref}search-aggregations-metrics-percentile-aggregation.html[percentiles] -* {es-ref}search-aggregations-metrics-percentile-rank-aggregation.html[percentile ranks] + +* *Metric Aggregations*: + +* {ref}/search-aggregations-metrics-valuecount-aggregation.html[count] +* {ref}/search-aggregations-metrics-avg-aggregation.html[average] +* {ref}/search-aggregations-metrics-sum-aggregation.html[sum] +* {ref}/search-aggregations-metrics-min-aggregation.html[min] +* {ref}/search-aggregations-metrics-max-aggregation.html[max] +* {ref}/search-aggregations-metrics-stats-aggregation.html[standard deviation] +* {ref}/search-aggregations-metrics-cardinality-aggregation.html[unique count] +* {ref}/search-aggregations-metrics-percentile-aggregation.html[median] (50th percentile) +* {ref}/search-aggregations-metrics-percentile-aggregation.html[percentiles] +* {ref}/search-aggregations-metrics-percentile-rank-aggregation.html[percentile ranks] +* {ref}/search-aggregations-metrics-top-hits-aggregation.html[top hit] +* {ref}/search-aggregations-metrics-geocentroid-aggregation.html[geo centroid] + + +* *Parent Pipeline Aggregations*: + +* {ref}/search-aggregations-pipeline-derivative-aggregation.html[derivative] +* {ref}/search-aggregations-pipeline-cumulative-sum-aggregation.html[cumulative sum] +* {ref}/search-aggregations-pipeline-movavg-aggregation.html[moving average] +* {ref}/search-aggregations-pipeline-serialdiff-aggregation.html[serial diff] + + +* *Sibling Pipeline Aggregations*: + +* {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[average bucket] +* {ref}/search-aggregations-pipeline-sum-bucket-aggregation.html[sum bucket] +* {ref}/search-aggregations-pipeline-min-bucket-aggregation.html[min bucket] +* {ref}/search-aggregations-pipeline-max-bucket-aggregation.html[max bucket] + . For the visualizations X axis, select a bucket aggregation: + -* {es-ref}search-aggregations-bucket-datehistogram-aggregation.html[date histogram] -* {es-ref}search-aggregations-bucket-range-aggregation.html[range] -* {es-ref}search-aggregations-bucket-terms-aggregation.html[terms] -* {es-ref}search-aggregations-bucket-filters-aggregation.html[filters] -* {es-ref}search-aggregations-bucket-significantterms-aggregation.html[significant terms] +* {ref}/search-aggregations-bucket-datehistogram-aggregation.html[date histogram] +* {ref}/search-aggregations-bucket-range-aggregation.html[range] +* {ref}/search-aggregations-bucket-terms-aggregation.html[terms] +* {ref}/search-aggregations-bucket-filters-aggregation.html[filters] +* {ref}/search-aggregations-bucket-significantterms-aggregation.html[significant terms] For example, if you're indexing Apache server logs, you could build bar chart that shows the distribution of incoming requests by geographic location by @@ -109,10 +144,18 @@ include::visualize/markdown.asciidoc[] include::visualize/metric.asciidoc[] +include::visualize/goal.asciidoc[] + include::visualize/pie.asciidoc[] include::visualize/tilemap.asciidoc[] +include::visualize/regionmap.asciidoc[] + +include::visualize/time-series-visual-builder.asciidoc[] + include::visualize/tagcloud.asciidoc[] include::visualize/heatmap.asciidoc[] + +include::visualize/visualization-raw-data.asciidoc[] diff --git a/docs/visualize/datatable.asciidoc b/docs/visualize/datatable.asciidoc index f3bcaa54b02d6..15f865de884a8 100644 --- a/docs/visualize/datatable.asciidoc +++ b/docs/visualize/datatable.asciidoc @@ -8,35 +8,35 @@ the table into additional tables. Each bucket type supports the following aggregations: -*Date Histogram*:: A {es-ref}search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a +*Date Histogram*:: A {ref}/search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a numeric field and organized by date. You can specify a time frame for the intervals in seconds, minutes, hours, days, weeks, months, or years. You can also specify a custom interval frame by selecting *Custom* as the interval and specifying a number and a time unit in the text field. Custom interval time units are *s* for seconds, *m* for minutes, *h* for hours, *d* for days, *w* for weeks, and *y* for years. Different units support different levels of precision, down to one second. -*Histogram*:: A standard {es-ref}search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a +*Histogram*:: A standard {ref}/search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a numeric field. Specify an integer interval for this field. Select the *Show empty buckets* checkbox to include empty intervals in the histogram. -*Range*:: With a {es-ref}search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges +*Range*:: With a {ref}/search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges of values for a numeric field. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*Date Range*:: A {es-ref}search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values +*Date Range*:: A {ref}/search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values that are within a range of dates that you specify. You can specify the ranges for the dates using -{es-ref}common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. +{ref}/common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. Click the red *(/)* symbol to remove a range. -*IPv4 Range*:: The {es-ref}search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to +*IPv4 Range*:: The {ref}/search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to specify ranges of IPv4 addresses. Click *Add Range* to add a set of range endpoints. Click the red *(/)* symbol to remove a range. -*Terms*:: A {es-ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top +*Terms*:: A {ref}/search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top or bottom _n_ elements of a given field to display, ordered by count or a custom metric. -*Filters*:: You can specify a set of {es-ref}search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. +*Filters*:: You can specify a set of {ref}/search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. You can specify a filter as a query string or in JSON format, just as in the Discover search bar. Click *Add Filter* to add another filter. Click the image:images/labelbutton.png[] *label* button to open the label field, where you can type in a name to display on the visualization. *Significant Terms*:: Displays the results of the experimental -{es-ref}search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. The value of the +{ref}/search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. The value of the *Size* parameter defines the number of entries this aggregation returns. -*Geohash*:: The {es-ref}search-aggregations-bucket-geohashgrid-aggregation.html[_geohash_] aggregation displays points +*Geohash*:: The {ref}/search-aggregations-bucket-geohashgrid-aggregation.html[_geohash_] aggregation displays points based on the geohash coordinates. Once you've specified a bucket type aggregation, you can define sub-buckets to refine the visualization. Click @@ -58,7 +58,7 @@ definition, as in the following example: { "script" : "doc['grade'].value * 1.2" } NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable -{es-ref}modules-scripting.html[dynamic Groovy scripting]. +{ref}/modules-scripting.html[dynamic Groovy scripting]. The availability of these options varies depending on the aggregation you choose. @@ -72,9 +72,3 @@ Checkboxes are available to enable and disable the following behaviors: *Show partial rows*:: Check this box to display a row even when there is no result. NOTE: Enabling these behaviors may have a substantial effect on performance. - -[float] -[[datatable-viewing-detailed-information]] -=== Viewing Detailed Information - -include::visualization-raw-data.asciidoc[] diff --git a/docs/visualize/goal.asciidoc b/docs/visualize/goal.asciidoc new file mode 100644 index 0000000000000..32306411ae8b2 --- /dev/null +++ b/docs/visualize/goal.asciidoc @@ -0,0 +1,36 @@ +[[goal-chart]] +== Goal and Gauge + +A goal visualization displays how your metric progresses toward a fixed goal. A gauge visualization displays in which +predefined range falls your metric. + +include::y-axis-aggs.asciidoc[] + +Open the *Advanced* link to display more customization options: + +*JSON Input*:: A text field where you can add specific JSON-formatted properties to merge with the aggregation +definition, as in the following example: + +[source,shell] +{ "script" : "doc['grade'].value * 1.2" } + +NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable +{ref}/modules-scripting.html[dynamic Groovy scripting]. + +The availability of these options varies depending on the aggregation you choose. + +Click the *Options* tab to change the following options: + +- *Gauge Type* select between arc, circle and metric display type. +- *Percentage Mode* will show all values as percentages +- *Vertical Split* will put the gauges one under another instead of one next to another +- *Show Labels* selects whether you want to show or hide the labels +- *Sub Text* text for the label that appears below the value +- *Auto Extend Range* automatically grows the gauge if value is over its extents. +- *Ranges* you can add custom ranges. Each range will get assigned a color. If value falls within that range it will get +assigned that color. A chart with a single range is called a goal chart. A chart with multiple ranges is called a gauge +chart. +- *Color Options* define how to color your ranges (which color schema to use). Color options are only visible if more than +one range is defined. +- *Style - Show Scale* shows or hides the scale +- *Style - Color Labels* whether the labels should have the same color as the range where the value falls in diff --git a/docs/visualize/heatmap.asciidoc b/docs/visualize/heatmap.asciidoc index 8c895d19b026b..aff106bdb349c 100644 --- a/docs/visualize/heatmap.asciidoc +++ b/docs/visualize/heatmap.asciidoc @@ -1,7 +1,7 @@ [[heatmap-chart]] == Heatmap Chart -A heat map is a graphical representation of data where the individual values contained in a matrix are represented as colors. +A heat map is a graphical representation of data where the individual values contained in a matrix are represented as colors. The color for each matrix position is determined by the _metrics_ aggregation. The following aggregations are available for this chart: @@ -9,42 +9,42 @@ include::y-axis-aggs.asciidoc[] The _buckets_ aggregations determine what information is being retrieved from your data set. -Before you choose a buckets aggregation, specify if you are defining buckets for X or Y axis within a single chart -or splitting into multiple charts. A multiple chart split must run before any other aggregations. -When you split a chart, you can change if the splits are displayed in a row or a column by clicking +Before you choose a buckets aggregation, specify if you are defining buckets for X or Y axis within a single chart +or splitting into multiple charts. A multiple chart split must run before any other aggregations. +When you split a chart, you can change if the splits are displayed in a row or a column by clicking the *Rows | Columns* selector. This chart's X and Y axis supports the following aggregations. Click the linked name of each aggregation to visit the main Elasticsearch documentation for that aggregation. -*Date Histogram*:: A {es-ref}search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a +*Date Histogram*:: A {ref}/search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a numeric field and organized by date. You can specify a time frame for the intervals in seconds, minutes, hours, days, weeks, months, or years. You can also specify a custom interval frame by selecting *Custom* as the interval and specifying a number and a time unit in the text field. Custom interval time units are *s* for seconds, *m* for minutes, *h* for hours, *d* for days, *w* for weeks, and *y* for years. Different units support different levels of precision, down to one second. -*Histogram*:: A standard {es-ref}search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a +*Histogram*:: A standard {ref}/search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a numeric field. Specify an integer interval for this field. Select the *Show empty buckets* checkbox to include empty intervals in the histogram. -*Range*:: With a {es-ref}search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges +*Range*:: With a {ref}/search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges of values for a numeric field. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*Date Range*:: A {es-ref}search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values +*Date Range*:: A {ref}/search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values that are within a range of dates that you specify. You can specify the ranges for the dates using -{es-ref}common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. +{ref}/common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*IPv4 Range*:: The {es-ref}search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to +*IPv4 Range*:: The {ref}/search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to specify ranges of IPv4 addresses. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*Terms*:: A {es-ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top +*Terms*:: A {ref}/search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top or bottom _n_ elements of a given field to display, ordered by count or a custom metric. -*Filters*:: You can specify a set of {es-ref}search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. +*Filters*:: You can specify a set of {ref}/search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. You can specify a filter as a query string or in JSON format, just as in the Discover search bar. Click *Add Filter* to add another filter. Click the image:images/labelbutton.png[Label button icon] *label* button to open the label field, where you can type in a name to display on the visualization. *Significant Terms*:: Displays the results of the experimental -{es-ref}search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. +{ref}/search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. Enter a string in the *Custom Label* field to change the display label. @@ -72,12 +72,9 @@ Select the *Options* tab to change the following aspects of the chart: *Color Scale*:: You can switch between linear, log and sqrt scales for color scale. *Scale to Data Bounds*:: The default Y axis bounds are zero and the maximum value returned in the data. Check this box to change both upper and lower bounds to match the values returned in the data. -*Number of Colors*:: Number of color buckets to create. Minimum is 2 and maximum is 10. +*Number of Colors*:: Number of color buckets to create. Minimum is 2 and maximum is 10. *Percentage Mode*:: Enabling this will show legend values as percentages. *Custom Range*:: You can define custom ranges for your color buckets. For each of the color bucket you need to specify -the minimum value (inclusive) and the maximum value (exclusive) of a range. +the minimum value (inclusive) and the maximum value (exclusive) of a range. *Show Label*:: Enables showing labels with cell values in each cell *Rotate*:: Allows rotating the cell value label by 90 degrees. - - -include::visualization-raw-data.asciidoc[] \ No newline at end of file diff --git a/docs/visualize/metric.asciidoc b/docs/visualize/metric.asciidoc index 9849319188763..4cb29555eea77 100644 --- a/docs/visualize/metric.asciidoc +++ b/docs/visualize/metric.asciidoc @@ -14,14 +14,8 @@ definition, as in the following example: { "script" : "doc['grade'].value * 1.2" } NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable -{es-ref}modules-scripting.html[dynamic Groovy scripting]. +{ref}/modules-scripting.html[dynamic Groovy scripting]. The availability of these options varies depending on the aggregation you choose. Click the *Options* tab to display the font size slider. - -[float] -[[metric-viewing-detailed-information]] -=== Viewing Detailed Information - -include::visualization-raw-data.asciidoc[] diff --git a/docs/visualize/pie.asciidoc b/docs/visualize/pie.asciidoc index c8dfb646c718b..708ac1d7f8697 100644 --- a/docs/visualize/pie.asciidoc +++ b/docs/visualize/pie.asciidoc @@ -4,11 +4,11 @@ The slice size of a pie chart is determined by the _metrics_ aggregation. The following aggregations are available for this axis: -*Count*:: The {es-ref}search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of +*Count*:: The {ref}/search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of the elements in the selected index pattern. -*Sum*:: The {es-ref}search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric +*Sum*:: The {ref}/search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric field. Select a field from the drop-down. -*Unique Count*:: The {es-ref}search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns +*Unique Count*:: The {ref}/search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns the number of unique values in a field. Select a field from the drop-down. Enter a string in the *Custom Label* field to change the display label. @@ -21,33 +21,33 @@ if the splits are displayed in a row or a column by clicking the *Rows | Columns You can specify any of the following bucket aggregations for your pie chart: -*Date Histogram*:: A {es-ref}search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a +*Date Histogram*:: A {ref}/search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a numeric field and organized by date. You can specify a time frame for the intervals in seconds, minutes, hours, days, weeks, months, or years. You can also specify a custom interval frame by selecting *Custom* as the interval and specifying a number and a time unit in the text field. Custom interval time units are *s* for seconds, *m* for minutes, *h* for hours, *d* for days, *w* for weeks, and *y* for years. Different units support different levels of precision, down to one second. -*Histogram*:: A standard {es-ref}search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a +*Histogram*:: A standard {ref}/search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a numeric field. Specify an integer interval for this field. Select the *Show empty buckets* checkbox to include empty intervals in the histogram. -*Range*:: With a {es-ref}search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges +*Range*:: With a {ref}/search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges of values for a numeric field. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*Date Range*:: A {es-ref}search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values +*Date Range*:: A {ref}/search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values that are within a range of dates that you specify. You can specify the ranges for the dates using -{es-ref}common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. +{ref}/common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. Click the red *(/)* symbol to remove a range. -*IPv4 Range*:: The {es-ref}search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to +*IPv4 Range*:: The {ref}/search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to specify ranges of IPv4 addresses. Click *Add Range* to add a set of range endpoints. Click the red *(/)* symbol to remove a range. -*Terms*:: A {es-ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top +*Terms*:: A {ref}/search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top or bottom _n_ elements of a given field to display, ordered by count or a custom metric. -*Filters*:: You can specify a set of {es-ref}search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. +*Filters*:: You can specify a set of {ref}/search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. You can specify a filter as a query string or in JSON format, just as in the Discover search bar. Click *Add Filter* to add another filter. Click the image:images/labelbutton.png[] *label* button to open the label field, where you can type in a name to display on the visualization. *Significant Terms*:: Displays the results of the experimental -{es-ref}search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. The value of the +{ref}/search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. The value of the *Size* parameter defines the number of entries this aggregation returns. After defining an initial bucket aggregation, you can define sub-buckets to refine the visualization. Click *+ Add @@ -72,7 +72,7 @@ definition, as in the following example: { "script" : "doc['grade'].value * 1.2" } NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable -{es-ref}modules-scripting.html[dynamic Groovy scripting]. +{ref}/modules-scripting.html[dynamic Groovy scripting]. The availability of these options varies depending on the aggregation you choose. @@ -83,9 +83,3 @@ Select the *Options* tab to change the following aspects of the table: After changing options, click the *Apply changes* button to update your visualization, or the grey *Discard changes* button to keep your visualization in its current state. - -[float] -[[pie-viewing-detailed-information]] -==== Viewing Detailed Information - -include::visualization-raw-data.asciidoc[] diff --git a/docs/visualize/regionmap.asciidoc b/docs/visualize/regionmap.asciidoc new file mode 100644 index 0000000000000..9237677283691 --- /dev/null +++ b/docs/visualize/regionmap.asciidoc @@ -0,0 +1,41 @@ +[[regionmap]] +== Region Maps + +Region maps are thematic maps in which boundary vector shapes are colored using a gradient: +higher intensity colors indicate larger values, and lower intensity colors indicate smaller values. +These are also known as choropleth maps. + +image::images/regionmap.png[] + + +=== Configuration + +To create a region map, you configure an inner join that joins the result of an Elasticsearch terms aggregation +and a reference vector file based on a shared key. + +==== Data + +===== Metrics + +Select any of the supported _Metric_ or _Sibling Pipeline Aggregations_. + +===== Buckets + +Configure a _Terms_ aggregation. The term is the _key_ that is used to join the results to the vector data on the map. + +==== Options + +===== Layer Settings +- *Vector map*: select from a list of vector maps. This list includes the maps that are hosted by the Š https://www.elastic.co/elastic-maps-service[Elastic Maps Service], +as well as your self-hosted layers that are configured in the *config/kibana.yml* file. To learn more about how to configure Kibana +to make self-hosted layers available, see the <> documentation. +- *Join field*: this is the property from the selected vector map that will be used to join on the terms in your terms-aggregation. +When terms cannot be joined to any of the shapes in the vector layer because there is no exact match in the vector layer, Kibana will display a warning. +To turn of these warnings, go to *Management/Kibana/Advanced Settings* and set `visualization:regionmap:showWarnings` to `false`. + +===== Style Settings +- *Color Schema*: the color range used to color the shapes. + +===== Basic Settings +- *Legend Position*: the location on the screen where the legend should be rendered. +- *Show Tooltip*: indicates whether a tooltip should be displayed when hovering over a shape.. \ No newline at end of file diff --git a/docs/visualize/tagcloud.asciidoc b/docs/visualize/tagcloud.asciidoc index f001485c6e0e6..04aef6af9df7c 100644 --- a/docs/visualize/tagcloud.asciidoc +++ b/docs/visualize/tagcloud.asciidoc @@ -1,7 +1,7 @@ [[tagcloud-chart]] == Tag Clouds -A tag cloud visualization is a visual representation of text data, typically used to visualize free form text. +A tag cloud visualization is a visual representation of text data, typically used to visualize free form text. Tags are usually single words, and the importance of each tag is shown with font size or color. The font size for each word is determined by the _metrics_ aggregation. The following aggregations are available for @@ -16,7 +16,7 @@ Before you choose a buckets aggregation, select the *Split Tags* option. You can specify the following bucket aggregations for tag cloud visualization: -*Terms*:: A {es-ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top +*Terms*:: A {ref}/search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top or bottom _n_ elements of a given field to display, ordered by count or a custom metric. You can click the *Advanced* link to display more customization options for your metrics or bucket aggregation: @@ -28,7 +28,7 @@ definition, as in the following example: { "script" : "doc['grade'].value * 1.2" } NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable -{es-ref}modules-scripting.html[dynamic Groovy scripting]. +{ref}/modules-scripting.html[dynamic Groovy scripting]. Select the *Options* tab to change the following aspects of the chart: @@ -39,6 +39,3 @@ regularize the display of data sets with variabilities that are themselves highl *Orientation*:: You can select how to orientate your text in the tag cloud. You can choose one of the following options: Single, right angles and multiple. *Font Size*:: Allows you to set minimum and maximum font size to use for this visualization. - - -include::visualization-raw-data.asciidoc[] \ No newline at end of file diff --git a/docs/visualize/tilemap.asciidoc b/docs/visualize/tilemap.asciidoc index cbb52d01a3408..56740e873f971 100644 --- a/docs/visualize/tilemap.asciidoc +++ b/docs/visualize/tilemap.asciidoc @@ -1,81 +1,53 @@ [[tilemap]] -== Tile Maps +== Coordinate Maps -A tile map displays a geographic area overlaid with circles keyed to the data determined by the buckets you specify. +A coordinate map displays a geographic area overlaid with circles keyed to the data determined by the buckets you specify. -NOTE: By default, Kibana uses the https://www.elastic.co/elastic-tile-service[Elastic Tile Service] +NOTE: By default, Kibana uses the https://www.elastic.co/elastic-maps-service[Elastic Maps Service] to display map tiles. To use other tile service providers, configure the <> in `kibana.yml`. -The default _metrics_ aggregation for a tile map is the *Count* aggregation. You can select any of the following +=== Configuration + +==== Data + +===== Metrics + +The default _metrics_ aggregation for a coordinate map is the *Count* aggregation. You can select any of the following aggregations as the metrics aggregation: -*Count*:: The {es-ref}search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of +*Count*:: The {ref}/search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of the elements in the selected index pattern. -*Average*:: This aggregation returns the {es-ref}search-aggregations-metrics-avg-aggregation.html[_average_] of a numeric +*Average*:: This aggregation returns the {ref}/search-aggregations-metrics-avg-aggregation.html[_average_] of a numeric field. Select a field from the drop-down. -*Sum*:: The {es-ref}search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric +*Sum*:: The {ref}/search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric field. Select a field from the drop-down. -*Min*:: The {es-ref}search-aggregations-metrics-min-aggregation.html[_min_] aggregation returns the minimum value of a +*Min*:: The {ref}/search-aggregations-metrics-min-aggregation.html[_min_] aggregation returns the minimum value of a numeric field. Select a field from the drop-down. -*Max*:: The {es-ref}search-aggregations-metrics-max-aggregation.html[_max_] aggregation returns the maximum value of a +*Max*:: The {ref}/search-aggregations-metrics-max-aggregation.html[_max_] aggregation returns the maximum value of a numeric field. Select a field from the drop-down. -*Unique Count*:: The {es-ref}search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns +*Unique Count*:: The {ref}/search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns the number of unique values in a field. Select a field from the drop-down. Enter a string in the *Custom Label* field to change the display label. -The _buckets_ aggregations determine what information is being retrieved from your data set. +===== Buckets -Before you choose a buckets aggregation, specify if you are splitting the chart or displaying the buckets as *Geo -Coordinates* on a single chart. A multiple chart split must run before any other aggregations. +Coordinate maps use the {es-ref}search-aggregations-bucket-geohashgrid-aggregation.html[_geohash_] aggregation. Select a field, typically coordinates, from the +drop-down. -Tile maps use the *Geohash* aggregation as their initial aggregation. Select a field, typically coordinates, from the -drop-down. The *Precision* slider determines the granularity of the results displayed on the map. See the documentation -for the {es-ref}search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[geohash grid] -aggregation for details on the area specified by each precision level. Kibana supports a maximum geohash length of 7. +- The_Change precision on map zoom_ box is checked by default. Uncheck the box to disable this behavior. +The _Precision_ slider determines the granularity of the results displayed on the map. See the documentation +for the {ref}/search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[geohash grid] +aggregation for details on the area specified by each precision level. NOTE: Higher precisions increase memory usage for the browser displaying Kibana as well as for the underlying Elasticsearch cluster. -Once you've specified a buckets aggregation, you can define sub-aggregations to refine the visualization. Tile maps -only support sub-aggregations as split charts. Click *+ Add Sub Aggregation*, then *Split Chart* to select a -sub-aggregation from the list of types: - -*Date Histogram*:: A {es-ref}search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a -numeric field and organized by date. You can specify a time frame for the intervals in seconds, minutes, hours, days, -weeks, months, or years. You can also specify a custom interval frame by selecting *Custom* as the interval and -specifying a number and a time unit in the text field. Custom interval time units are *s* for seconds, *m* for minutes, -*h* for hours, *d* for days, *w* for weeks, and *y* for years. Different units support different levels of precision, -down to one second. -*Histogram*:: A standard {es-ref}search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a -numeric field. Specify an integer interval for this field. Select the *Show empty buckets* checkbox to include empty -intervals in the histogram. -*Range*:: With a {es-ref}search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges -of values for a numeric field. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove -a range. -After changing options, click the *Apply changes* button to update your visualization, or the grey *Discard -changes* button to keep your visualization in its current state. -*Date Range*:: A {es-ref}search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values -that are within a range of dates that you specify. You can specify the ranges for the dates using -{es-ref}common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. -Click the red *(/)* symbol to remove a range. -*IPv4 Range*:: The {es-ref}search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to -specify ranges of IPv4 addresses. Click *Add Range* to add a set of range endpoints. Click the red *(/)* symbol to -remove a range. -*Terms*:: A {es-ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top -or bottom _n_ elements of a given field to display, ordered by count or a custom metric. -*Filters*:: You can specify a set of {es-ref}search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. -You can specify a filter as a query string or in JSON format, just as in the Discover search bar. Click *Add Filter* to -add another filter. Click the image:images/labelbutton.png[] *label* button to open the label field, where you can type -in a name to display on the visualization. -*Significant Terms*:: Displays the results of the experimental -{es-ref}search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. The value of the -*Size* parameter defines the number of entries this aggregation returns. -*Geohash*:: The {es-ref}search-aggregations-bucket-geohashgrid-aggregation.html[_geohash_] aggregation displays points -based on the geohash coordinates. - -NOTE: By default, the *Change precision on map zoom* box is checked. Uncheck the box to disable this behavior. +- The _place markers off grid (use {ref}/search-aggregations-metrics-geocentroid-aggregation.html[geocentroid])_ box is checked by default. When this box is checked, the markers are +placed in the center of all the documents in that bucket. When unchecked, the markers are placed in the center +of the geohash grid cell. Leaving this checked generally results in a more accurate visualization. + Enter a string in the *Custom Label* field to change the display label. @@ -90,11 +62,12 @@ definition, as in the following example: { "script" : "doc['grade'].value * 1.2" } NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable -{es-ref}modules-scripting.html[dynamic Groovy scripting]. +{ref}/modules-scripting.html[dynamic Groovy scripting]. The availability of these options varies depending on the aggregation you choose. -Select the *Options* tab to change the following aspects of the chart: +==== Options + *Map type*:: Select one of the following options from the drop-down. *_Scaled Circle Markers_*:: Scale the size of the markers based on the metric aggregation's value. @@ -130,7 +103,9 @@ changes* button to keep your visualization in its current state. [float] [[navigating-map]] -==== Navigating the Map + +=== Navigating the Map + Once your tilemap visualization is ready, you can explore the map in several ways: * Click and hold anywhere on the map and move the cursor to move the map center. Hold Shift and drag a bounding box @@ -140,9 +115,3 @@ across the map to zoom in on the selection. geohash buckets that have at least one result. * Click the *Latitude/Longitude Filter* image:images/viz-lat-long-filter.png[] button, then drag a bounding box across the map, to create a filter for the box coordinates. - -[float] -[[tilemap-viewing-detailed-information]] -==== Viewing Detailed Information - -include::visualization-raw-data.asciidoc[] diff --git a/docs/visualize/time-series-visual-builder.asciidoc b/docs/visualize/time-series-visual-builder.asciidoc new file mode 100644 index 0000000000000..1bf3d52421de0 --- /dev/null +++ b/docs/visualize/time-series-visual-builder.asciidoc @@ -0,0 +1,214 @@ +[[time-series-visual-builder]] +== Time Series Visual Builder + +*Experimental Feature* + +Time Series Visual Builder is a time series data visualizer with an emphasis +on allowing you to use the full power of Elasticsearch aggregation framework. +Time Series Visual Builder allows you to combine an infinite number of +aggregations and pipeline aggregations to display complex data in a meaningful way. + +image:images/tsvb-screenshot.png["Time Series Visual Builder Interface"] + +=== Featured Visualizations + +Time Series Visual Build comes with 5 different visualization types. You can +switch between each visualization type using the tabbed picker at the top of the +interface. + + +==== Time Series + +A histogram visualization that supports area, line, bar, and steps along with +multiple y-axis. You can fully customize the colors, points, line thickness +and fill opacity. This visualization also supports time shifting to compare two +time periods. This visualization also supports annotations which can be loaded from +a seperate index based on a query. + +image:images/tsvb-timeseries.png["Time Series Visualization"] + + +==== Metric + +A visualization for displaying the latest number in a series. This visualization +supports 2 metrics; a primary metric and a secondary metric. The labels and +backgrounds can be fully customizable based on a set of rules. + +image:images/tsvb-metric.png["Metric Visualization"] + + +==== Top N + +This is a horizontal bar chart where the y-axis is based on a series of metrics +and the x-axis is the latest value in those series; sorted in descending order. +The color of the bars are fully customizable based on set of rules. + +image:images/tsvb-top-n.png["Top N Visualization"] + + +==== Gauge + +This is a single value gauge visualization based on the latest value in a series. +The face of the gauge can either be a half-circle gauge or full-circle. You +can customize the thicknesses of the inner and outer lines to achieve a desired +design aesthetic. The color of the gauge and the text are fully customizable based +on a set of rules. + +image:images/tsvb-gauge.png["Gauge Visualization"] + + +==== Markdown + +This visualization allows you to enter Markdown text and embed Mustache +template syntax to customize the Markdown with data based on a set of series. +This visualization also supports HTML markup along with the ability to define +a custom stylesheet. + +image:images/tsvb-markdown.png["Markdown Visualization"] + + +=== Interface Overview + +The user interface for each visualization is compose of a "Data" tab and "Panel +Options". The only exception to that is the Time Series and Markdown visualizations; +the Time Series has a third tab for annotations and the Markdown has a third tab for +the editor. + +==== Data Tab + +The data tab is used for configuring the series for each visualization. This tab +allows you to add multiple series, depending on what the visualization +supports, with multiple aggregations composed together to create a single metric. +Here is a breakdown of the significant components of the data tab UI. + +===== Series Label and Color + +Each series supports a label which will be used for legends and titles depending on +which visualization type is selected. For series that are grouped by a term, you +can specify a mustache variable of `{{key}}` to substitute the term. For most +visualizations you can also choose a color by clicking on the swatch, this will display +the color picker. + +image:images/tsvb-data-tab-label.png["Label Example"] + +===== Metrics + +Each series supports multiple metrics (aggregations); the last metric (aggregation) +is the value that will be displayed for the series, this is indicated with the "eye" +icon to the left of the metric. Metrics can be composed using pipeline aggregations. +A common use case is to create a metric with a "max" aggregation then create a "derivative" +metric and choose the previous "max" metric as the source; this will create a rate. + +image:images/tsvb-data-tab-derivative-example.png["Derivative Example"] + +===== Series Options + +Each series also supports a set of options which are dependent on the type of +visualizations you have selected. Universal across each visualization type +you can configure: + +* Data format +* Time range offset +* Index pattern, timestamp, and interval override + + +image:images/tsvb-data-tab-series-options.png["Default Series Options"] + +For the Time Series visualization you can also configure: + +* Chart type +* Options for each chart type +* Legend Visibility +* Y-Axis options +* Split color theme + +image:images/tsvb-data-tab-series-options-time-series.png["Time Series Series Options"] + +===== Group By Controls + +At the bottom of the metrics there is a set of "Group By" controls that allows you +to specify how the series should be grouped or split. There are four choices: + +* Everything +* Filter (single) +* Filters (multiple with configurable colors) +* Terms + +By default the series is grouped by everything. + +==== Panel Options Tab + +The panel options tab is used for configuring the entire panel; the set of options +available is dependent on which visualization you have selected. Below is a list +of the options available per visualization: + +*Time Series* + +* Index pattern, timestamp, and Interval +* Y-Axis min and max +* Y-Axis position +* Background color +* Legend visibility +* Legend position +* Panel filter + +*Metric* + +* Index pattern, timestamp, and interval +* Panel filter +* Color rules for background and primary value + +*Top N* + +* Index pattern, timestamp, and interval +* Panel filter +* Background color +* Item URL +* Color rules for bar colors + +*Gauge* + +* Index pattern, timestamp, and interval +* Panel filter +* Background color +* Gauge max +* Gauge style +* Inner gauge color +* Inner gauge width +* Gauge line width +* Color rules for gauge line + +*Markdown* + +* Index pattern, timestamp, and interval +* Panel filter +* Background color +* Scroll bar visibility +* Vertical alignment of content +* Custom Panel CSS with support for Less syntax + +==== Annotations Tab + +The annotations tab is used for adding annotation data sources to the Time Series +Visualization. You can configure the following options: + +* Index pattern and time field +* Annotation color +* Annotation icon +* Fields to include in message +* Format of message +* Filtering options at the panel and global level + +image:images/tsvb-annotations.png["Annotation Tab"] + +==== Markdown Tab + +The markdown tab is used for editing the source for the Markdown visualization. +The user interface has an editor on the left side and the available variables from +the data tab on the right side. You can click on the variable names to insert +the mustache template variable into the markdown at the cursor position. The mustache +syntax uses the Handlebar.js processor which is an extended version of the Mustache +template language. + +image:images/tsvb-markdown-tab.png["Markdown Tab"] + diff --git a/docs/visualize/visualization-raw-data.asciidoc b/docs/visualize/visualization-raw-data.asciidoc index ef50426146e88..520039f2363a1 100644 --- a/docs/visualize/visualization-raw-data.asciidoc +++ b/docs/visualize/visualization-raw-data.asciidoc @@ -1,5 +1,9 @@ -To display the raw data behind the visualization, click the bar at the bottom of the container. Tabs with detailed -information about the raw data replace the visualization: +[[vis-spy]] +== Visualization Spy + +To display the raw data behind the visualization, click the image:images/spy-open-button.png[] button in the bottom left corner of the container. The visualization spy panel will open. Use the select input to view detailed information about the raw data. + +image:images/spy-panel.png[] .Table A representation of the underlying data, presented as a paginated data grid. You can sort the items @@ -16,7 +20,10 @@ A summary of the statistics related to the request and the response, presented a grid includes the query duration, the request duration, the total number of records found on the server, and the index pattern used to make the query. +.Debug +The visualization saved state presented in JSON format. + To export the raw data behind the visualization as a comma-separated-values (CSV) file, click on either the *Raw* or *Formatted* links at the bottom of any of the detailed information tabs. A raw export contains the data as it is stored in Elasticsearch. A formatted export contains the results of any applicable Kibana -<>. +<>. \ No newline at end of file diff --git a/docs/visualize/x-axis-aggs.asciidoc b/docs/visualize/x-axis-aggs.asciidoc index e6ac7d7d4cf2c..a37b90890a364 100644 --- a/docs/visualize/x-axis-aggs.asciidoc +++ b/docs/visualize/x-axis-aggs.asciidoc @@ -4,34 +4,34 @@ chart, or for split charts. This chart's X axis supports the following aggregations. Click the linked name of each aggregation to visit the main Elasticsearch documentation for that aggregation. -*Date Histogram*:: A {es-ref}search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a +*Date Histogram*:: A {ref}/search-aggregations-bucket-datehistogram-aggregation.html[_date histogram_] is built from a numeric field and organized by date. You can specify a time frame for the intervals in seconds, minutes, hours, days, weeks, months, or years. You can also specify a custom interval frame by selecting *Custom* as the interval and specifying a number and a time unit in the text field. Custom interval time units are *s* for seconds, *m* for minutes, *h* for hours, *d* for days, *w* for weeks, and *y* for years. Different units support different levels of precision, down to one second. -*Histogram*:: A standard {es-ref}search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a +*Histogram*:: A standard {ref}/search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a numeric field. Specify an integer interval for this field. Select the *Show empty buckets* checkbox to include empty intervals in the histogram. -*Range*:: With a {es-ref}search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges +*Range*:: With a {ref}/search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges of values for a numeric field. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*Date Range*:: A {es-ref}search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values +*Date Range*:: A {ref}/search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values that are within a range of dates that you specify. You can specify the ranges for the dates using -{es-ref}common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. +{ref}/common-options.html#date-math[_date math_] expressions. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*IPv4 Range*:: The {es-ref}search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to +*IPv4 Range*:: The {ref}/search-aggregations-bucket-iprange-aggregation.html[_IPv4 range_] aggregation enables you to specify ranges of IPv4 addresses. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. -*Terms*:: A {es-ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top +*Terms*:: A {ref}/search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top or bottom _n_ elements of a given field to display, ordered by count or a custom metric. -*Filters*:: You can specify a set of {es-ref}search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. +*Filters*:: You can specify a set of {ref}/search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. You can specify a filter as a query string or in JSON format, just as in the Discover search bar. Click *Add Filter* to add another filter. Click the image:images/labelbutton.png[Label button icon] *label* button to open the label field, where you can type in a name to display on the visualization. *Significant Terms*:: Displays the results of the experimental -{es-ref}search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. +{ref}/search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. Once you've specified an X axis aggregation, you can define sub-aggregations to refine the visualization. Click *+ Add Sub Aggregation* to define a sub-aggregation, then choose *Split Area* or *Split Chart*, then select a sub-aggregation diff --git a/docs/visualize/xychart.asciidoc b/docs/visualize/xychart.asciidoc index b33666b38210a..ce77fc221f8ab 100644 --- a/docs/visualize/xychart.asciidoc +++ b/docs/visualize/xychart.asciidoc @@ -1,33 +1,10 @@ [[xy-chart]] -== X/Y Charts -X/Y charts refer to Area, Line and Bar charts which allow you to plot your data on X/Y axis. - -First you need to select your _metrics_ which define Value axis. The following aggregations are available for this axis: - -*Count*:: The {es-ref}search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of -the elements in the selected index pattern. -*Average*:: This aggregation returns the {es-ref}search-aggregations-metrics-avg-aggregation.html[_average_] of a numeric -field. Select a field from the drop-down. -*Sum*:: The {es-ref}search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric -field. Select a field from the drop-down. -*Min*:: The {es-ref}search-aggregations-metrics-min-aggregation.html[_min_] aggregation returns the minimum value of a -numeric field. Select a field from the drop-down. -*Max*:: The {es-ref}search-aggregations-metrics-max-aggregation.html[_max_] aggregation returns the maximum value of a -numeric field. Select a field from the drop-down. -*Unique Count*:: The {es-ref}search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns -the number of unique values in a field. Select a field from the drop-down. -*Percentiles*:: The {es-ref}search-aggregations-metrics-percentile-aggregation.html[_percentile_] aggregation divides the -values in a numeric field into percentile bands that you specify. Select a field from the drop-down, then specify one -or more ranges in the *Percentiles* fields. Click the *X* to remove a percentile field. Click *+ Add* to add a -percentile field. -*Percentile Rank*:: The {es-ref}search-aggregations-metrics-percentile-rank-aggregation.html[_percentile ranks_] -aggregation returns the percentile rankings for the values in the numeric field you specify. Select a numeric field -from the drop-down, then specify one or more percentile rank values in the *Values* fields. Click the *X* to remove a -values field. Click *+Add* to add a values field. - -You can add an aggregation by clicking the *+ Add Aggregation* button. +== Line, Area, and Bar charts +Line, Area, and Bar charts allow you to plot your data on X/Y axis. -Enter a string in the *Custom Label* field to change the display label. +First you need to select your _metrics_ which define Value axis. + +include::y-axis-aggs.asciidoc[] The _buckets_ aggregations determine what information is being retrieved from your data set. @@ -52,75 +29,59 @@ definition, as in the following example: { "script" : "doc['grade'].value * 1.2" } NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you to enable -{es-ref}modules-scripting.html[dynamic Groovy scripting]. +{ref}/modules-scripting.html[dynamic Groovy scripting]. The availability of these options varies depending on the aggregation you choose. -=== Options +=== Metrics & Axes -Select the *Options* tab to change the way your data is visualized. Customization options are grouped into areas to provide easier access: +Select the *Metrics & Axes* tab to change the way each individual metric is shown on the chart. +The data series are styled in the _Metrics_ section, while the axes are styled in the X and Y axis sections. -==== General Settings +==== Metrics +Modify how each metric from the Data panel is visualized on the chart. -*Legend Position*:: Allows you to move your legend to the *left*, *right*, *top* or *bottom* -*Show Tooltip*:: Enables or disables the display of tooltip on hovering over chart objects -*Order buckets by descending sum*:: Check this box to enforce sorting of buckets by descending sum in the visualization +*Chart type*:: Choose between *Area*, *Line*, and *Bar* types. +*Mode*:: stack the different metrics, or plot them next to each other +*Value Axis*:: choose the axis you want to plot this data too (the properties of each are configured under Y-axes). +*Line mode*:: should the outline of lines or bars appear *smooth*, *straight*, or *stepped*. + +==== Y-axis -==== Category Axis -The category axis is defined by the bucket aggregation you chose under Data tab. Here you can customize how its displayed: +Style all the Y-axes of the chart. -*Show*:: You can chose if you want to display category axis or not -*Position*:: You can choose where you want to display category axis. If you position your category axis on the left or right the chart will turn to the horizontal type. +*Position*:: position of the Y-axis (*left* or *right* for vertical charts, and *top* or *bottom* for horizontal charts). +*Scale type*:: scaling of the values (*linear*, *log*, or *square root*) *Advanced Options*:: *Labels - Show Labels*:::: Allows you to hide axis labels *Labels - Filter Labels*:::: If filter labels is enabled some labels will be hidden in case there is not enough space to display them *Labels - Rotate*:::: You can enter the number in degrees for how much you want to rotate labels *Labels - Truncate*:::: You can enter the size in pixels to which the label is truncated +*Scale to Data Bounds*:::: The default Y-axis bounds are zero and the maximum value returned in the data. Check + this box to change both upper and lower bounds to match the values returned in the data. +*Custom Extents*:::: You can define custom minimum and maximum for each axis -==== Grid -You can enable grid on the chart. By default grid is displayed on the category axis only. - -*Category Lines*:: You can disable the display of grid lines on category axis -*Value Axis*:: You can choose on which (if any) of the value axes you want to display grid lines -*Color*:: You can specify the color for gird lines - -==== Value Axes -By default one value axis is defined on a chart, but you can add as much as you need. Clicking on the + sign will create a new value axis. - -Each value axis has this options: - -*Show*:: You can decide to hide the value axis completely -*Label*:: Allows to define a custom label -*Position*:: Options for position depend on the position of your category axis. If category axis is positioned on the top or bottom you can position value axis on the left or right. In the opposite case you can position your value axis on the top or bottom. -*Mode*:: Mode allows you to define how value axis represents the values. You can choose among the following: -_wiggle_:::: Displays the aggregations as a https://en.wikipedia.org/wiki/Streamgraph[streamgraph]. -_percentage_:::: Displays each aggregation as a proportion of the total. -_silhouette_:::: Displays each aggregation as variance from a central line. -*Scale Type*:: Allows you to choose between *linear*, *square root* and *log* scale +==== X-Axis +*Position*:: position of the X-Axis (*left* or *right* for horizontal charts, and *top* or *bottom* for vertical charts). *Advanced Options*:: *Labels - Show Labels*:::: Allows you to hide axis labels *Labels - Filter Labels*:::: If filter labels is enabled some labels will be hidden in case there is not enough spave to display them *Labels - Rotate*:::: You can enter the number in degrees for how much you want to rotate labels *Labels - Truncate*:::: You can enter the size in pixels to which the label is truncated -*Scale to Data Bounds*:::: The default Y axis bounds are zero and the maximum value returned in the data. Check - this box to change both upper and lower bounds to match the values returned in the data. -*Custom Extents*:::: You can define custom minimum and maximum for each axis -==== Series -Each of the *Series* represents a metric you added in the data tab. For each Series you can define the following options: -*Show*:: Allows you to hide specific series. -*Type*:: Allows you to choose between *Area*, *Line* and *Histogram* types. This allows you to show each metrics as a different chart type. -*Mode*:: Allows you to choose how your values are showed on the chart. -_stacked_:::: Values for this series will be stacked. Stacking happens per value axis. This means that if you have two series on one value axis and both modes are set to stacked they will be stacked on top of each other. If one of the series modes is set to normal the other series values (in case series are split) will be split and the second series will be grouped next to them. If you want both series to be stacked but not to be stacked on top of each other you will want to plot them on separate value axes. -_normal_:::: In normal mode values will not be stacked. -*Value Axis*:: You can define to which value axis this series belongs. If you dont select a value it will belong to the first value axis. +=== Panel Settings + +These are options that apply to the entire chart and not just the individual data series. -Additional options might be available depending the on the *type* selected. For Area and Line types you can decide to smooth the lines. And for Line chart you can decide to not show lines or circles. +==== Common options +*Legend Position*:: Move your legend to the *left*, *right*, *top* or *bottom* +*Show Tooltip*:: Enables or disables the display of tooltip on hovering over chart objects +*Current Time Marker*:: Show a line indicating the current time -[float] -[[vertbar-viewing-detailed-information]] -== Viewing Detailed Information +==== Grid options +You can enable grid on the chart. By default grid is displayed on the category axis only. -include::visualization-raw-data.asciidoc[] +*X-axis*:: You can disable the display of grid lines on category axis +*Y-axis*:: You can choose on which (if any) of the value axes you want to display grid lines diff --git a/docs/visualize/y-axis-aggs.asciidoc b/docs/visualize/y-axis-aggs.asciidoc index 58803f308f069..40682464a10cb 100644 --- a/docs/visualize/y-axis-aggs.asciidoc +++ b/docs/visualize/y-axis-aggs.asciidoc @@ -1,26 +1,58 @@ -*Count*:: The {es-ref}search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of +Metric Aggregations: + +*Count*:: The {ref}/search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of the elements in the selected index pattern. -*Average*:: This aggregation returns the {es-ref}search-aggregations-metrics-avg-aggregation.html[_average_] of a numeric +*Average*:: This aggregation returns the {ref}/search-aggregations-metrics-avg-aggregation.html[_average_] of a numeric field. Select a field from the drop-down. -*Sum*:: The {es-ref}search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric +*Sum*:: The {ref}/search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric field. Select a field from the drop-down. -*Min*:: The {es-ref}search-aggregations-metrics-min-aggregation.html[_min_] aggregation returns the minimum value of a +*Min*:: The {ref}/search-aggregations-metrics-min-aggregation.html[_min_] aggregation returns the minimum value of a numeric field. Select a field from the drop-down. -*Max*:: The {es-ref}search-aggregations-metrics-max-aggregation.html[_max_] aggregation returns the maximum value of a +*Max*:: The {ref}/search-aggregations-metrics-max-aggregation.html[_max_] aggregation returns the maximum value of a numeric field. Select a field from the drop-down. -*Unique Count*:: The {es-ref}search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns +*Unique Count*:: The {ref}/search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns the number of unique values in a field. Select a field from the drop-down. -*Standard Deviation*:: The {es-ref}search-aggregations-metrics-extendedstats-aggregation.html[_extended stats_] +*Standard Deviation*:: The {ref}/search-aggregations-metrics-extendedstats-aggregation.html[_extended stats_] aggregation returns the standard deviation of data in a numeric field. Select a field from the drop-down. -*Percentiles*:: The {es-ref}search-aggregations-metrics-percentile-aggregation.html[_percentile_] aggregation divides the +*Percentiles*:: The {ref}/search-aggregations-metrics-percentile-aggregation.html[_percentile_] aggregation divides the values in a numeric field into percentile bands that you specify. Select a field from the drop-down, then specify one or more ranges in the *Percentiles* fields. Click the *X* to remove a percentile field. Click *+ Add* to add a percentile field. -*Percentile Rank*:: The {es-ref}search-aggregations-metrics-percentile-rank-aggregation.html[_percentile ranks_] +*Percentile Rank*:: The {ref}/search-aggregations-metrics-percentile-rank-aggregation.html[_percentile ranks_] aggregation returns the percentile rankings for the values in the numeric field you specify. Select a numeric field from the drop-down, then specify one or more percentile rank values in the *Values* fields. Click the *X* to remove a values field. Click *+Add* to add a values field. +Parent Pipeline Aggregations: + +For each of the parent pipeline aggregations you have to define the metric for which the aggregation is calculated. +That could be one of your existing metrics or a new one. You can also nest this aggregations +(for example to produce 3rd derivative) + +*Derivative*:: The {ref}/search-aggregations-pipeline-derivative-aggregation.html[_derivative_] aggregation calculates +the derivative of specific metrics. +*Cumulative Sum*:: The {ref}/search-aggregations-pipeline-cumulative-sum-aggregation.html[_cumulative sum_] aggregation +calculates the cumulative sum of a specified metric in a parent histogram +*Moving Average*:: The {ref}/search-aggregations-pipeline-movavg-aggregation.html[_moving average_] aggregation will +slide a window across the data and emit the average value of that window +*Serial Diff*:: The {ref}/search-aggregations-pipeline-serialdiff-aggregation.html[_serial differencing_] is a technique +where values in a time series are subtracted from itself at different time lags or period + +Sibling Pipeline Aggregations: + +Just like with parent pipeline aggregations you need to provide a metric for which to calculate the sibling aggregation. +On top of that you also need to provide a bucket aggregation which will define the buckets on which the sibling +aggregation will run + +*Average Bucket*:: The {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[_avg bucket_] +calculates the (mean) average value of a specified metric in a sibling aggregation +*Sum Bucket*:: The {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[_sum bucket_] +calculates the sum of values of a specified metric in a sibling aggregation +*Min Bucket*:: The {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[_min bucket_] +calculates the minimum value of a specified metric in a sibling aggregation +*Max Bucket*:: The {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[_max bucket_] +calculates the maximum value of a specified metric in a sibling aggregation + You can add an aggregation by clicking the *+ Add Metrics* button. Enter a string in the *Custom Label* field to change the display label. diff --git a/package.json b/package.json index 8e2db9c7fa8b4..a21fbf78cd907 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,8 @@ "dashboarding" ], "private": false, - "version": "6.0.0-alpha1", + "version": "7.0.0-alpha1", + "branch": "master", "build": { "number": 8467, "sha": "6cb7fec4e154faa0a4a3fee4b33dfef91b9870d9" @@ -45,10 +46,10 @@ "test:browser": "grunt test:browser", "test:ui": "grunt test:ui", "test:ui:server": "grunt test:ui:server", - "test:ui:runner": "grunt test:ui:runner", + "test:ui:runner": "echo 'use `node scripts/functional_test_runner`' && false", "test:server": "grunt test:server", "test:coverage": "grunt test:coverage", - "test:visualRegression": "grunt test:visualRegression", + "test:visualRegression": "grunt test:visualRegression:buildGallery", "checkLicenses": "grunt licenses", "build": "grunt build", "release": "grunt release", @@ -56,33 +57,29 @@ "precommit": "grunt precommit", "karma": "karma start", "elasticsearch": "grunt esvm:dev:keepalive", - "elasticsearchWithPlugins": "grunt esvm:withPlugins:keepalive", "lint": "grunt eslint:source", "lintroller": "grunt eslint:fixSource", - "makelogs": "makelogs", - "mocha": "mocha", - "mocha:debug": "mocha --debug-brk", + "makelogs": "echo 'use `node scripts/makelogs`' && false", + "mocha": "echo 'use `node scripts/mocha`' && false", "sterilize": "grunt sterilize", "uiFramework:start": "grunt uiFramework:start", - "uiFramework:dev": "node tasks/utils/ui_framework_test --env=jsdom --watch", - "uiFramework:coverage": "node tasks/utils/ui_framework_test --env=jsdom --coverage" + "uiFramework:build": "grunt uiFramework:build", + "uiFramework:createComponent": "yo ./ui_framework/generator-kui/app/component.js", + "uiFramework:documentComponent": "yo ./ui_framework/generator-kui/app/documentation.js" }, "repository": { "type": "git", "url": "https://github.com/elastic/kibana.git" }, "dependencies": { - "@bigfunger/decompress-zip": "0.2.0-stripfix3", - "@bigfunger/jsondiffpatch": "0.1.38-webpack", "@elastic/datemath": "2.3.0", - "@elastic/httpolyglot": "0.1.2-elasticpatch1", + "@elastic/filesaver": "1.1.2", + "@elastic/leaflet-draw": "0.2.3", + "@elastic/leaflet-heat": "0.1.3", + "@elastic/numeral": "2.2.1", + "@elastic/test-subj-selector": "0.2.1", + "@elastic/ui-ace": "0.2.3", "@elastic/webpack-directory-name-as-main": "2.0.2", - "@spalger/filesaver": "1.1.2", - "@spalger/leaflet-draw": "0.2.3", - "@spalger/leaflet-heat": "0.1.3", - "@spalger/numeral": "^2.0.0", - "@spalger/test-subj-selector": "0.2.1", - "@spalger/ui-ace": "0.2.3", "JSONStream": "1.1.1", "accept-language-parser": "1.2.0", "angular": "1.4.7", @@ -97,31 +94,30 @@ "autoprefixer-loader": "2.0.0", "babel-cli": "6.18.0", "babel-core": "6.21.0", - "babel-jest": "18.0.0", + "babel-jest": "20.0.3", "babel-loader": "6.2.10", "babel-plugin-add-module-exports": "0.2.1", + "babel-plugin-transform-async-generator-functions": "6.24.1", + "babel-plugin-transform-class-properties": "6.24.1", + "babel-plugin-transform-object-rest-spread": "6.23.0", "babel-polyfill": "6.20.0", - "babel-preset-env": "1.1.8", - "babel-preset-es2015": "6.22.0", - "babel-preset-es2015-node": "6.1.1", + "babel-preset-env": "1.4.0", "babel-preset-react": "6.22.0", - "babel-preset-stage-1": "6.22.0", "babel-register": "6.18.0", - "babel-runtime": "6.20.0", "bluebird": "2.9.34", "body-parser": "1.12.0", - "boom": "2.8.0", + "boom": "5.2.0", "brace": "0.5.1", "bunyan": "1.7.1", "check-hash": "1.0.1", "color": "1.0.3", "commander": "2.8.1", - "css-loader": "0.17.0", + "css-loader": "0.28.1", "d3": "3.5.6", "d3-cloud": "1.2.1", "dragula": "3.7.0", - "elasticsearch": "13.0.0-beta2", - "elasticsearch-browser": "13.0.0-beta2", + "elasticsearch": "13.0.1", + "elasticsearch-browser": "13.0.1", "encode-uri-query": "1.0.0", "even-better": "7.0.2", "expiry-js": "0.1.7", @@ -129,8 +125,8 @@ "expose-loader": "0.7.0", "extract-text-webpack-plugin": "0.8.2", "file-loader": "0.8.4", - "font-awesome": "4.4.0", "flot-charts": "0.8.3", + "font-awesome": "4.4.0", "glob": "5.0.13", "glob-all": "3.0.1", "good-squeeze": "2.1.0", @@ -138,11 +134,12 @@ "h2o2": "5.1.1", "handlebars": "4.0.5", "hapi": "14.2.0", + "http-proxy-agent": "1.0.0", "imports-loader": "0.6.4", "inert": "4.0.2", "jade": "1.11.0", "jade-loader": "0.7.1", - "joi": "6.6.1", + "joi": "10.4.1", "jquery": "2.2.4", "js-yaml": "3.4.1", "json-loader": "0.5.3", @@ -157,11 +154,13 @@ "mkdirp": "0.5.1", "moment": "2.13.0", "moment-timezone": "0.5.4", + "ngreact": "0.3.0", "no-ui-slider": "1.2.0", "node-fetch": "1.3.2", - "node-uuid": "1.4.7", "pegjs": "0.9.0", - "postcss-loader": "1.2.1", + "postcss-loader": "1.3.3", + "prop-types": "15.5.8", + "proxy-from-env": "1.0.0", "pui-react-overlay-trigger": "7.5.4", "pui-react-tooltip": "7.5.4", "querystring-browser": "1.0.4", @@ -172,12 +171,14 @@ "react-anything-sortable": "1.6.1", "react-color": "2.11.1", "react-dom": "15.4.2", + "react-input-autosize": "1.1.0", "react-markdown": "2.4.2", "react-redux": "4.4.5", "react-router": "2.0.0", "react-router-redux": "4.0.4", "react-select": "1.0.0-rc.1", "react-sortable": "1.1.0", + "react-toggle": "3.0.1", "reactcss": "1.0.7", "redux": "3.0.0", "redux-thunk": "0.1.0", @@ -195,31 +196,34 @@ "trunc-text": "1.0.2", "ui-select": "0.19.6", "url-loader": "0.5.6", + "uuid": "3.0.1", "validate-npm-package-name": "2.2.2", "vision": "4.1.0", "webpack": "github:elastic/webpack#fix/query-params-for-aliased-loaders", - "whatwg-fetch": "0.9.0", - "wreck": "6.2.0" + "wreck": "6.2.0", + "yauzl": "2.7.0" }, "devDependencies": { - "@elastic/eslint-config-kibana": "0.4.0", - "@spalger/babel-presets": "0.3.2", + "@elastic/eslint-config-kibana": "0.10.0", + "@elastic/eslint-import-resolver-kibana": "0.8.1", + "@elastic/eslint-plugin-kibana-custom": "1.0.3", "angular-mocks": "1.4.7", - "auto-release-sinon": "1.0.3", - "babel-eslint": "6.1.2", + "babel-eslint": "7.2.3", "chai": "3.5.0", + "chalk": "2.0.1", "chance": "1.0.6", "cheerio": "0.22.0", "chokidar": "1.6.0", - "chromedriver": "2.24.1", + "chromedriver": "2.29.0", "classnames": "2.2.5", "enzyme": "2.7.0", "enzyme-to-json": "1.4.5", - "eslint": "3.11.1", - "eslint-plugin-babel": "4.0.0", - "eslint-plugin-jest": "19.0.1", - "eslint-plugin-mocha": "4.7.0", - "eslint-plugin-react": "6.10.3", + "eslint": "3.19.0", + "eslint-plugin-babel": "4.1.1", + "eslint-plugin-import": "2.3.0", + "eslint-plugin-jest": "20.0.3", + "eslint-plugin-mocha": "4.9.0", + "eslint-plugin-react": "7.1.0", "event-stream": "3.3.2", "expect.js": "0.3.1", "faker": "1.1.0", @@ -230,9 +234,9 @@ "grunt-cli": "0.1.13", "grunt-contrib-clean": "1.0.0", "grunt-contrib-copy": "0.8.1", - "grunt-esvm": "3.2.6", + "grunt-esvm": "3.2.10", "grunt-karma": "2.0.0", - "grunt-run": "0.6.0", + "grunt-run": "0.7.0", "grunt-simple-mocha": "0.4.0", "gulp-sourcemaps": "1.7.3", "highlight.js": "9.0.0", @@ -241,42 +245,46 @@ "html-loader": "0.4.3", "husky": "0.8.1", "image-diff": "1.6.0", - "intern": "3.2.3", "istanbul-instrumenter-loader": "0.1.3", - "jest": "19.0.0", - "jest-cli": "19.0.0", + "jest": "20.0.4", + "jest-cli": "20.0.4", "jsdom": "9.9.1", - "karma": "1.2.0", - "karma-chrome-launcher": "0.2.0", - "karma-coverage": "0.5.1", - "karma-firefox-launcher": "0.1.6", - "karma-ie-launcher": "0.2.0", - "karma-mocha": "0.2.0", - "karma-safari-launcher": "0.1.1", + "karma": "1.7.0", + "karma-chrome-launcher": "2.1.1", + "karma-coverage": "1.1.1", + "karma-firefox-launcher": "1.0.1", + "karma-ie-launcher": "1.0.0", + "karma-mocha": "1.3.0", + "karma-safari-launcher": "1.0.0", "keymirror": "0.1.1", + "leadfoot": "1.7.1", "license-checker": "5.1.2", "load-grunt-config": "0.19.2", - "makelogs": "3.2.3", + "makelogs": "4.0.2", "marked-text-renderer": "0.1.0", - "mocha": "2.5.3", + "mocha": "3.3.0", + "mock-fs": "4.2.0", "murmurhash3js": "3.0.1", "ncp": "2.0.0", "nock": "8.0.0", "node-sass": "3.8.0", - "npm": "3.10.10", - "portscanner": "1.0.0", "proxyquire": "1.7.10", "sass-loader": "4.0.0", "simple-git": "1.37.0", "sinon": "1.17.2", "source-map": "0.5.6", "source-map-support": "0.2.10", - "supertest": "1.2.0", + "strip-ansi": "^3.0.1", + "supertest": "3.0.0", "supertest-as-promised": "2.0.2", - "webpack-dev-server": "1.14.1" + "svg-sprite-loader": "3.0.6", + "tree-kill": "1.1.0", + "webpack-dev-server": "1.14.1", + "yeoman-generator": "1.1.1", + "yo": "2.0.0" }, "engines": { - "node": "6.9.5", + "node": "6.11.1", "npm": "3.10.10" } } diff --git a/packages/README.md b/packages/README.md new file mode 100644 index 0000000000000..41c77f1ab85eb --- /dev/null +++ b/packages/README.md @@ -0,0 +1,6 @@ +## Kibana-related node modules + +This folder contains node modules that are created by Kibana developers, for use in Kibana and its plugins. + +For each such node module, create a sub-folder in this folder. The path/name of the folder should mirror the `name` in the node module's `package.json`. To publish the node module as an npm package, `cd` into the package's sub-folder, and run `npm publish` as usual. + diff --git a/packages/eslint-config-kibana/.eslintrc.js b/packages/eslint-config-kibana/.eslintrc.js new file mode 100644 index 0000000000000..088f2e370113a --- /dev/null +++ b/packages/eslint-config-kibana/.eslintrc.js @@ -0,0 +1,136 @@ +module.exports = { + parser: 'babel-eslint', + + plugins: [ + 'mocha', + 'babel', + 'react', + 'import', + ], + + env: { + es6: true, + amd: true, + node: true, + mocha: true, + browser: true, + }, + + parserOptions: { + sourceType: 'module', + ecmaVersion: 6, + ecmaFeatures: { experimentalObjectRestSpread: true }, + }, + + rules: { + 'block-scoped-var': 'error', + camelcase: [ 'error', { properties: 'never' } ], + 'comma-dangle': 'off', + 'comma-style': [ 'error', 'last' ], + 'consistent-return': 'off', + curly: [ 'error', 'multi-line' ], + 'dot-location': [ 'error', 'property' ], + 'dot-notation': [ 'error', { allowKeywords: true } ], + eqeqeq: [ 'error', 'allow-null' ], + 'guard-for-in': 'error', + indent: [ 'error', 2, { SwitchCase: 1 } ], + 'key-spacing': [ 'off', { align: 'value' } ], + 'max-len': [ 'error', 140, 2, { ignoreComments: true, ignoreUrls: true } ], + 'new-cap': [ 'error', { capIsNewExceptions: [ 'Private' ] } ], + 'no-bitwise': 'off', + 'no-caller': 'error', + 'no-cond-assign': 'off', + 'no-const-assign': 'error', + 'no-debugger': 'error', + 'no-empty': 'error', + 'no-eval': 'error', + 'no-extend-native': 'error', + 'no-extra-parens': 'off', + 'no-extra-semi': [ 'error' ], + 'no-global-assign': 'error', + 'no-irregular-whitespace': 'error', + 'no-iterator': 'error', + 'no-loop-func': 'error', + 'no-multi-spaces': 'off', + 'no-multi-str': 'error', + 'no-nested-ternary': 'error', + 'no-new': 'off', + 'no-path-concat': 'off', + 'no-proto': 'error', + 'no-redeclare': 'error', + 'no-restricted-globals': [ 'error', 'context' ], + 'no-return-assign': 'off', + 'no-script-url': 'error', + 'no-sequences': 'error', + 'no-shadow': 'off', + 'no-trailing-spaces': 'error', + 'no-undef': 'error', + 'no-underscore-dangle': 'off', + 'no-unused-expressions': 'off', + 'no-unused-vars': [ 'error' ], + 'no-use-before-define': [ 'error', 'nofunc' ], + 'no-var': 'error', + 'no-with': 'error', + 'one-var': [ 'error', 'never' ], + 'prefer-const': 'error', + quotes: [ 'error', 'single', { allowTemplateLiterals: true } ], + 'semi-spacing': [ 'error', { before: false, after: true } ], + semi: [ 'error', 'always' ], + 'space-before-blocks': [ 'error', 'always' ], + 'space-before-function-paren': [ 'error', { anonymous: 'always', named: 'never' } ], + 'space-in-parens': [ 'error', 'never' ], + 'space-infix-ops': [ 'error', { int32Hint: false } ], + 'space-unary-ops': [ 'error' ], + strict: [ 'error', 'never' ], + 'valid-typeof': 'error', + 'wrap-iife': [ 'error', 'outside' ], + yoda: 'off', + + 'object-curly-spacing': 'off', // overriden with babel/object-curly-spacing + 'babel/object-curly-spacing': [ 'error', 'always' ], + + 'jsx-quotes': ['error', 'prefer-double'], + 'react/jsx-uses-react': 'error', + 'react/react-in-jsx-scope': 'error', + 'react/jsx-uses-vars': 'error', + 'react/jsx-no-undef': 'error', + 'react/jsx-pascal-case': 'error', + 'react/jsx-closing-bracket-location': ['error', 'line-aligned'], + 'react/jsx-closing-tag-location': 'error', + 'react/jsx-curly-spacing': ['error', 'never', { allowMultiline: true }], + 'react/jsx-indent-props': ['error', 2], + 'react/jsx-max-props-per-line': ['error', { maximum: 1, when: 'multiline' }], + 'react/jsx-no-duplicate-props': ['error', { ignoreCase: true }], + 'react/self-closing-comp': 'error', + 'react/jsx-wrap-multilines': ['error', { + declaration: true, + assignment: true, + return: true, + arrow: true, + }], + 'react/jsx-first-prop-new-line': ['error', 'multiline-multiprop'], + 'react/jsx-equals-spacing': ['error', 'never'], + 'react/jsx-indent': ['error', 2], + 'react/no-did-mount-set-state': 'error', + 'react/no-did-update-set-state': 'error', + 'react/no-will-update-set-state': 'error', + 'react/no-is-mounted': 'error', + 'react/no-multi-comp': ['error', { ignoreStateless: true }], + 'react/no-unknown-property': 'error', + 'react/prefer-es6-class': ['error', 'always'], + 'react/prefer-stateless-function': ['error', { ignorePureComponents: true }], + 'react/no-unescaped-entities': 'error', + + 'mocha/handle-done-callback': 'error', + 'mocha/no-exclusive-tests': 'error', + + 'import/no-unresolved': [ 'error', { 'amd': true, 'commonjs': true } ], + 'import/named': 'error', + 'import/namespace': 'error', + 'import/default': 'error', + 'import/export': 'error', + 'import/no-named-as-default': 'error', + 'import/no-named-as-default-member': 'error', + 'import/no-duplicates': 'error', + } +} diff --git a/packages/eslint-config-kibana/.gitignore b/packages/eslint-config-kibana/.gitignore new file mode 100644 index 0000000000000..7c0c9807a8b7d --- /dev/null +++ b/packages/eslint-config-kibana/.gitignore @@ -0,0 +1,36 @@ +# Logs +logs +*.log +npm-debug.log* + +# Runtime data +pids +*.pid +*.seed + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (http://nodejs.org/api/addons.html) +build/Release + +# Dependency directory +node_modules + +# Optional npm cache directory +.npm + +# Optional REPL history +.node_repl_history +.eslintrc.json + +yarn.lock \ No newline at end of file diff --git a/packages/eslint-config-kibana/.npmignore b/packages/eslint-config-kibana/.npmignore new file mode 100644 index 0000000000000..2ba159593147d --- /dev/null +++ b/packages/eslint-config-kibana/.npmignore @@ -0,0 +1,2 @@ +.eslintrc.yaml +tasks diff --git a/packages/eslint-config-kibana/README.md b/packages/eslint-config-kibana/README.md new file mode 100644 index 0000000000000..68c1639b834a5 --- /dev/null +++ b/packages/eslint-config-kibana/README.md @@ -0,0 +1,31 @@ +# eslint-config-kibana + +The eslint config used by the kibana team + +## Usage + +To use this eslint config, just install the peer dependencies and reference it +in your `.eslintrc`: + +```javascript +{ + extends: [ + '@elastic/eslint-config-kibana' + ] +} +``` + +## Optional jest config + +If the project uses the [jest test runner](https://facebook.github.io/jest/), +the `@elastic/eslint-config-kibana/jest` config can be extended as well to use +`eslint-plugin-jest` and add settings specific to it: + +```javascript +{ + extends: [ + '@elastic/eslint-config-kibana', + '@elastic/eslint-config-kibana/jest' + ] +} +``` diff --git a/packages/eslint-config-kibana/jest.js b/packages/eslint-config-kibana/jest.js new file mode 100644 index 0000000000000..68776bb3db147 --- /dev/null +++ b/packages/eslint-config-kibana/jest.js @@ -0,0 +1,15 @@ +module.exports = { + plugins: [ + 'jest', + ], + + env: { + 'jest/globals': true, + }, + + rules: { + 'jest/no-disabled-tests': 'error', + 'jest/no-focused-tests': 'error', + 'jest/no-identical-title': 'error', + }, +}; diff --git a/packages/eslint-config-kibana/package.json b/packages/eslint-config-kibana/package.json new file mode 100644 index 0000000000000..28e728380e657 --- /dev/null +++ b/packages/eslint-config-kibana/package.json @@ -0,0 +1,29 @@ +{ + "name": "@elastic/eslint-config-kibana", + "version": "0.10.0", + "description": "The eslint config used by the kibana team", + "main": ".eslintrc.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/elastic/eslint-config-kibana.git" + }, + "keywords": [], + "author": "Spencer Alger ", + "license": "Apache-2.0", + "bugs": { + "url": "https://github.com/elastic/eslint-config-kibana/issues" + }, + "homepage": "https://github.com/elastic/eslint-config-kibana#readme", + "peerDependencies": { + "babel-eslint": "^7.2.3", + "eslint": "^4.1.0", + "eslint-plugin-babel": "^4.1.1", + "eslint-plugin-import": "^2.6.0", + "eslint-plugin-jest": "^20.0.3", + "eslint-plugin-mocha": "^4.9.0", + "eslint-plugin-react": "^7.1.0" + } +} diff --git a/packages/eslint-plugin-kibana-custom/README.md b/packages/eslint-plugin-kibana-custom/README.md new file mode 100644 index 0000000000000..6b6d0a0059a1f --- /dev/null +++ b/packages/eslint-plugin-kibana-custom/README.md @@ -0,0 +1,3 @@ +# Custom ESLint rules for Kibana + +This package contains custom ESLint rules used for Kibana development. \ No newline at end of file diff --git a/packages/eslint-plugin-kibana-custom/index.js b/packages/eslint-plugin-kibana-custom/index.js new file mode 100644 index 0000000000000..d76cec67a4bd2 --- /dev/null +++ b/packages/eslint-plugin-kibana-custom/index.js @@ -0,0 +1,7 @@ +module.exports.rules = { + 'no-default-export': context => ({ + ExportDefaultDeclaration: (node) => { + context.report(node, 'Default exports not allowed.'); + } + }) +}; diff --git a/packages/eslint-plugin-kibana-custom/package.json b/packages/eslint-plugin-kibana-custom/package.json new file mode 100644 index 0000000000000..c5b788da4fe58 --- /dev/null +++ b/packages/eslint-plugin-kibana-custom/package.json @@ -0,0 +1,9 @@ +{ + "name": "@elastic/eslint-plugin-kibana-custom", + "version": "1.0.3", + "description": "Custom ESLint rules for Kibana", + "repository": { + "type": "git", + "url": "https://github.com/elastic/kibana/tree/master/packages/%40elastic/eslint-plugin-kibana-custom" + } +} diff --git a/scripts/functional_test_runner.js b/scripts/functional_test_runner.js new file mode 100644 index 0000000000000..e6b555b449f1d --- /dev/null +++ b/scripts/functional_test_runner.js @@ -0,0 +1,2 @@ +require('../src/optimize/babel/register'); +require('../src/functional_test_runner/cli'); diff --git a/scripts/jest.js b/scripts/jest.js new file mode 100755 index 0000000000000..c59c88edb51ef --- /dev/null +++ b/scripts/jest.js @@ -0,0 +1,15 @@ +// # Run Jest tests +// +// All args will be forwarded directly to Jest, e.g. to watch tests run: +// +// node scripts/jest --watch +// +// or to build code coverage: +// +// node scripts/jest --coverage +// +// See all cli options in https://facebook.github.io/jest/docs/cli.html + +require('../src/optimize/babel/register'); +require('../src/jest/cli'); + diff --git a/scripts/makelogs.js b/scripts/makelogs.js new file mode 100644 index 0000000000000..b1cc91bfb0e73 --- /dev/null +++ b/scripts/makelogs.js @@ -0,0 +1 @@ +require('makelogs'); diff --git a/scripts/mocha.js b/scripts/mocha.js new file mode 100644 index 0000000000000..38a977af3e786 --- /dev/null +++ b/scripts/mocha.js @@ -0,0 +1,2 @@ +require('../src/optimize/babel/register'); +require('../test/scripts/run_mocha'); diff --git a/src/cli/cli.js b/src/cli/cli.js index b53dc922b67b0..fc3422ce02cd7 100644 --- a/src/cli/cli.js +++ b/src/cli/cli.js @@ -1,5 +1,5 @@ import _ from 'lodash'; -import pkg from '../utils/package_json'; +import { pkg } from '../utils'; import Command from './command'; import serveCommand from './serve/serve'; diff --git a/src/cli/cluster/__tests__/cluster_manager.js b/src/cli/cluster/__tests__/cluster_manager.js index 13184056c2b28..244666055e3ee 100644 --- a/src/cli/cluster/__tests__/cluster_manager.js +++ b/src/cli/cluster/__tests__/cluster_manager.js @@ -1,5 +1,5 @@ import expect from 'expect.js'; -import sinon from 'auto-release-sinon'; +import sinon from 'sinon'; import cluster from 'cluster'; import { sample } from 'lodash'; @@ -7,9 +7,10 @@ import ClusterManager from '../cluster_manager'; import Worker from '../worker'; describe('CLI cluster manager', function () { + const sandbox = sinon.sandbox.create(); - function setup() { - sinon.stub(cluster, 'fork', function () { + beforeEach(function () { + sandbox.stub(cluster, 'fork', function () { return { process: { kill: sinon.stub(), @@ -20,13 +21,14 @@ describe('CLI cluster manager', function () { send: sinon.stub() }; }); + }); - const manager = new ClusterManager({}); - return manager; - } + afterEach(function () { + sandbox.restore(); + }); it('has two workers', function () { - const manager = setup(); + const manager = new ClusterManager({}); expect(manager.workers).to.have.length(2); for (const worker of manager.workers) expect(worker).to.be.a(Worker); @@ -36,7 +38,7 @@ describe('CLI cluster manager', function () { }); it('delivers broadcast messages to other workers', function () { - const manager = setup(); + const manager = new ClusterManager({}); for (const worker of manager.workers) { Worker.prototype.start.call(worker);// bypass the debounced start method diff --git a/src/cli/cluster/__tests__/worker.js b/src/cli/cluster/__tests__/worker.js index f2b53e671a357..e119869ce7caa 100644 --- a/src/cli/cluster/__tests__/worker.js +++ b/src/cli/cluster/__tests__/worker.js @@ -1,5 +1,5 @@ import expect from 'expect.js'; -import sinon from 'auto-release-sinon'; +import sinon from 'sinon'; import cluster from 'cluster'; import { findIndex } from 'lodash'; @@ -21,30 +21,28 @@ function assertListenerRemoved(emitter, event) { } function setup(opts = {}) { - sinon.stub(cluster, 'fork', function () { - return new MockClusterFork(); - }); - const worker = new Worker(opts); workersToShutdown.push(worker); return worker; } describe('CLI cluster manager', function () { + const sandbox = sinon.sandbox.create(); + + beforeEach(function () { + sandbox.stub(cluster, 'fork', () => new MockClusterFork()); + }); afterEach(async function () { - for (const worker of workersToShutdown) { - if (worker.shutdown.restore) { - // if the shutdown method was stubbed, restore it first - worker.shutdown.restore(); - } + sandbox.restore(); + for (const worker of workersToShutdown) { await worker.shutdown(); } }); describe('#onChange', function () { - context('opts.watch = true', function () { + describe('opts.watch = true', function () { it('restarts the fork', function () { const worker = setup({ watch: true }); sinon.stub(worker, 'start'); @@ -54,7 +52,7 @@ describe('CLI cluster manager', function () { }); }); - context('opts.watch = false', function () { + describe('opts.watch = false', function () { it('does not restart the fork', function () { const worker = setup({ watch: false }); sinon.stub(worker, 'start'); @@ -66,7 +64,7 @@ describe('CLI cluster manager', function () { }); describe('#shutdown', function () { - context('after starting()', function () { + describe('after starting()', function () { it('kills the worker and unbinds from message, online, and disconnect events', async function () { const worker = setup(); await worker.start(); @@ -84,7 +82,7 @@ describe('CLI cluster manager', function () { }); }); - context('before being started', function () { + describe('before being started', function () { it('does nothing', function () { const worker = setup(); worker.shutdown(); @@ -93,7 +91,7 @@ describe('CLI cluster manager', function () { }); describe('#parseIncomingMessage()', function () { - context('on a started worker', function () { + describe('on a started worker', function () { it(`is bound to fork's message event`, async function () { const worker = setup(); await worker.start(); @@ -120,7 +118,7 @@ describe('CLI cluster manager', function () { }); describe('#onMessage', function () { - context('when sent WORKER_BROADCAST message', function () { + describe('when sent WORKER_BROADCAST message', function () { it('emits the data to be broadcasted', function () { const worker = setup(); const data = {}; @@ -130,7 +128,7 @@ describe('CLI cluster manager', function () { }); }); - context('when sent WORKER_LISTENING message', function () { + describe('when sent WORKER_LISTENING message', function () { it('sets the listening flag and emits the listening event', function () { const worker = setup(); const stub = sinon.stub(worker, 'emit'); @@ -141,7 +139,7 @@ describe('CLI cluster manager', function () { }); }); - context('when passed an unkown message', function () { + describe('when passed an unkown message', function () { it('does nothing', function () { const worker = setup(); worker.onMessage('asdlfkajsdfahsdfiohuasdofihsdoif'); @@ -152,7 +150,7 @@ describe('CLI cluster manager', function () { }); describe('#start', function () { - context('when not started', function () { + describe('when not started', function () { it('creates a fork and waits for it to come online', async function () { const worker = setup(); @@ -179,7 +177,7 @@ describe('CLI cluster manager', function () { }); }); - context('when already started', function () { + describe('when already started', function () { it('calls shutdown and waits for the graceful shutdown to cause a restart', async function () { const worker = setup(); await worker.start(); diff --git a/src/cli/cluster/cluster_manager.js b/src/cli/cluster/cluster_manager.js index ce373ceb80677..d41bcc59fcbe7 100644 --- a/src/cli/cluster/cluster_manager.js +++ b/src/cli/cluster/cluster_manager.js @@ -7,7 +7,7 @@ import BasePathProxy from './base_path_proxy'; process.env.kbnWorkerType = 'managr'; -module.exports = class ClusterManager { +export default class ClusterManager { constructor(opts = {}, settings = {}) { this.log = new Log(opts.quiet, opts.silent); this.addedCount = 0; @@ -62,10 +62,23 @@ module.exports = class ClusterManager { bindAll(this, 'onWatcherAdd', 'onWatcherError', 'onWatcherChange'); if (opts.watch) { - this.setupWatching([ + const extraPaths = [ ...settings.plugins.paths, - ...settings.plugins.scanDirs - ]); + ...settings.plugins.scanDirs, + ]; + + const extraIgnores = settings.plugins.scanDirs + .map(scanDir => resolve(scanDir, '*')) + .concat(settings.plugins.paths) + .reduce((acc, path) => acc.concat( + resolve(path, 'test'), + resolve(path, 'build'), + resolve(path, 'target'), + resolve(path, 'scripts'), + resolve(path, 'docs'), + ), []); + + this.setupWatching(extraPaths, extraIgnores); } else this.startCluster(); @@ -79,9 +92,9 @@ module.exports = class ClusterManager { } } - setupWatching(extraPaths) { + setupWatching(extraPaths, extraIgnores) { const chokidar = require('chokidar'); - const fromRoot = require('../../utils/from_root'); + const { fromRoot } = require('../../utils'); const watchPaths = [ fromRoot('src/core_plugins'), @@ -94,7 +107,10 @@ module.exports = class ClusterManager { this.watcher = chokidar.watch(uniq(watchPaths), { cwd: fromRoot('.'), - ignored: /[\\\/](\..*|node_modules|bower_components|public|__tests__|coverage)[\\\/]/ + ignored: [ + /[\\\/](\..*|node_modules|bower_components|public|__tests__|coverage)[\\\/]/, + ...extraIgnores + ] }); this.watcher.on('add', this.onWatcherAdd); @@ -153,4 +169,4 @@ module.exports = class ClusterManager { this.log.bad('failed to watch files!\n', err.stack); process.exit(1); // eslint-disable-line no-process-exit } -}; +} diff --git a/src/cli/cluster/worker.js b/src/cli/cluster/worker.js index 2c16ecf411154..06528f2765313 100644 --- a/src/cli/cluster/worker.js +++ b/src/cli/cluster/worker.js @@ -17,7 +17,7 @@ const dead = fork => { return fork.isDead() || fork.killed; }; -module.exports = class Worker extends EventEmitter { +export default class Worker extends EventEmitter { constructor(opts) { opts = opts || {}; super(); @@ -89,7 +89,7 @@ module.exports = class Worker extends EventEmitter { // we don't need to react to process.exit anymore this.processBinder.destroy(); - // wait until the cluster reports this fork has exitted, then resolve + // wait until the cluster reports this fork has exited, then resolve await new Promise(resolve => this.once('fork:exit', resolve)); } } @@ -153,7 +153,7 @@ module.exports = class Worker extends EventEmitter { this.forkBinder.on('online', () => this.onOnline()); this.forkBinder.on('disconnect', () => this.onDisconnect()); - // when the cluster says a fork has exitted, check if it is ours + // when the cluster says a fork has exited, check if it is ours this.clusterBinder.on('exit', (fork, code) => this.onExit(fork, code)); // when the process exits, make sure we kill our workers @@ -162,4 +162,4 @@ module.exports = class Worker extends EventEmitter { // wait for the fork to report it is online before resolving await new Promise(cb => this.once('fork:online', cb)); } -}; +} diff --git a/src/cli/color.js b/src/cli/color.js index 56188418c0140..fc3011a0ecdec 100644 --- a/src/cli/color.js +++ b/src/cli/color.js @@ -1,7 +1,6 @@ - import _ from 'lodash'; import ansicolors from 'ansicolors'; -exports.green = _.flow(ansicolors.black, ansicolors.bgGreen); -exports.red = _.flow(ansicolors.white, ansicolors.bgRed); -exports.yellow = _.flow(ansicolors.black, ansicolors.bgYellow); +export const green = _.flow(ansicolors.black, ansicolors.bgGreen); +export const red = _.flow(ansicolors.white, ansicolors.bgRed); +export const yellow = _.flow(ansicolors.black, ansicolors.bgYellow); diff --git a/src/cli/command.js b/src/cli/command.js index 27e65a4e832d0..a4d9f57851ff4 100644 --- a/src/cli/command.js +++ b/src/cli/command.js @@ -91,4 +91,4 @@ Command.prototype.action = _.wrap(Command.prototype.action, function (action, fn }); }); -module.exports = Command; +export default Command; diff --git a/src/cli/help.js b/src/cli/help.js index b5078aad458fe..e78ca3271c065 100644 --- a/src/cli/help.js +++ b/src/cli/help.js @@ -1,6 +1,6 @@ import _ from 'lodash'; -module.exports = function (command, spaces) { +export default function help(command, spaces) { if (!_.size(command.commands)) { return command.outputHelp(); } @@ -24,7 +24,7 @@ ${indent(commandsSummary(command), 2)} ${cmdHelp(defCmd)} ` ).trim().replace(/^/gm, spaces || ''); -}; +} function indent(str, n) { return String(str || '').trim().replace(/^/gm, _.repeat(' ', n)); diff --git a/src/cli/log.js b/src/cli/log.js index 584c27d5f9857..3731d2c46a7f6 100644 --- a/src/cli/log.js +++ b/src/cli/log.js @@ -4,12 +4,12 @@ const log = _.restParam(function (color, label, rest1) { console.log.apply(console, [color(` ${_.trim(label)} `)].concat(rest1)); }); -import color from './color'; +import { green, yellow, red } from './color'; -module.exports = class Log { +export default class Log { constructor(quiet, silent) { - this.good = quiet || silent ? _.noop : _.partial(log, color.green); - this.warn = quiet || silent ? _.noop : _.partial(log, color.yellow); - this.bad = silent ? _.noop : _.partial(log, color.red); + this.good = quiet || silent ? _.noop : _.partial(log, green); + this.warn = quiet || silent ? _.noop : _.partial(log, yellow); + this.bad = silent ? _.noop : _.partial(log, red); } -}; +} diff --git a/src/cli/serve/__tests__/fixtures/invalid_config.yml b/src/cli/serve/__tests__/fixtures/invalid_config.yml new file mode 100644 index 0000000000000..df9ea641cd3fe --- /dev/null +++ b/src/cli/serve/__tests__/fixtures/invalid_config.yml @@ -0,0 +1,13 @@ +unknown: + key: 1 + +other: + unknown.key: 2 + third: 3 + +some.flat.key: 4 + +some.array: + - 1 + - 2 + - 3 diff --git a/src/cli/serve/__tests__/invalid_config.js b/src/cli/serve/__tests__/invalid_config.js new file mode 100644 index 0000000000000..72c5ca1428a26 --- /dev/null +++ b/src/cli/serve/__tests__/invalid_config.js @@ -0,0 +1,39 @@ +import { spawnSync } from 'child_process'; +import { resolve } from 'path'; + +import expect from 'expect.js'; + +const ROOT_DIR = resolve(__dirname, '../../../../'); +const INVALID_CONFIG_PATH = resolve(__dirname, 'fixtures/invalid_config.yml'); + +describe('cli invalid config support', function () { + this.timeout(20 * 1000); + this.slow(10 * 1000); + + it('exits with statusCode 64 and logs a single line when config is invalid', function () { + const { error, status, stdout } = spawnSync(process.execPath, [ + 'src/cli', + '--config', INVALID_CONFIG_PATH + ], { + cwd: ROOT_DIR + }); + + const logLines = stdout.toString('utf8') + .split('\n') + .filter(Boolean) + .map(JSON.parse); + + expect(error).to.be(undefined); + expect(status).to.be(64); + + expect(logLines).to.have.length(1); + expect(logLines[0]).to.have.property('tags') + .eql(['fatal']); + expect(logLines[0]).to.have.property('message') + .contain('"unknown.key"') + .contain('"other.unknown.key"') + .contain('"other.third"') + .contain('"some.flat.key"') + .contain('"some.array"'); + }); +}); diff --git a/src/cli/serve/__tests__/read_yaml_config.js b/src/cli/serve/__tests__/read_yaml_config.js index 7f876d9e84d17..ed57437d74379 100644 --- a/src/cli/serve/__tests__/read_yaml_config.js +++ b/src/cli/serve/__tests__/read_yaml_config.js @@ -29,7 +29,7 @@ describe('cli/serve/read_yaml_config', function () { }); }); - context('different cwd()', function () { + describe('different cwd()', function () { const oldCwd = process.cwd(); const newCwd = join(oldCwd, '..'); diff --git a/src/cli/serve/__tests__/reload_logging_config.js b/src/cli/serve/__tests__/reload_logging_config.js index c904989ebd35a..eb67ebf6b11f1 100644 --- a/src/cli/serve/__tests__/reload_logging_config.js +++ b/src/cli/serve/__tests__/reload_logging_config.js @@ -41,8 +41,8 @@ describe(`Server logging configuration`, function () { }); child.on('exit', code => { + expect([null, 0]).to.contain(code); expect(asserted).to.eql(true); - expect(code === null || code === 0).to.eql(true); done(); }); diff --git a/src/cli/serve/serve.js b/src/cli/serve/serve.js index aa515f62d075c..4dad11c97feb2 100644 --- a/src/cli/serve/serve.js +++ b/src/cli/serve/serve.js @@ -72,7 +72,7 @@ function readServerSettings(opts, extraCliOptions) { return settings; } -module.exports = function (program) { +export default function (program) { const command = program.command('serve'); command @@ -114,7 +114,7 @@ module.exports = function (program) { if (canCluster) { command .option('--dev', 'Run the server with development mode defaults') - .option('--no-ssl', 'Don\'t run the dev server using HTTPS') + .option('--ssl', 'Run the dev server using HTTPS') .option('--no-base-path', 'Don\'t put a proxy in front of the dev server, which adds a random basePath') .option('--no-watch', 'Prevents automatic restarts of the server in --dev mode'); } @@ -147,18 +147,27 @@ module.exports = function (program) { try { kbnServer = new KbnServer(settings); await kbnServer.ready(); - } - catch (err) { + } catch (error) { const { server } = kbnServer; - if (err.code === 'EADDRINUSE') { - logFatal(`Port ${err.port} is already in use. Another instance of Kibana may be running!`, server); - } else { - logFatal(err, server); + switch (error.code) { + case 'EADDRINUSE': + logFatal(`Port ${error.port} is already in use. Another instance of Kibana may be running!`, server); + break; + + case 'InvalidConfig': + logFatal(error.message, server); + break; + + default: + logFatal(error, server); + break; } kbnServer.close(); - process.exit(1); // eslint-disable-line no-process-exit + const exitCode = error.processExitCode == null ? 1 : error.processExitCode; + // eslint-disable-next-line no-process-exit + process.exit(exitCode); } process.on('SIGHUP', function reloadConfig() { @@ -170,11 +179,12 @@ module.exports = function (program) { return kbnServer; }); -}; +} function logFatal(message, server) { if (server) { server.log(['fatal'], message); + } else { + console.error('FATAL', message); } - console.error('FATAL', message); } diff --git a/src/cli_plugin/cli.js b/src/cli_plugin/cli.js index 5d3dd2f702267..c972727923585 100644 --- a/src/cli_plugin/cli.js +++ b/src/cli_plugin/cli.js @@ -1,5 +1,5 @@ import _ from 'lodash'; -import pkg from '../utils/package_json'; +import { pkg } from '../utils'; import Command from '../cli/command'; import listCommand from './list'; import installCommand from './install'; diff --git a/src/cli_plugin/install/__tests__/download.js b/src/cli_plugin/install/__tests__/download.js index 63cddbfc3fdd8..4693ed10e853b 100644 --- a/src/cli_plugin/install/__tests__/download.js +++ b/src/cli_plugin/install/__tests__/download.js @@ -8,6 +8,7 @@ import Logger from '../../lib/logger'; import { UnsupportedProtocolError } from '../../lib/errors'; import { download, _downloadSingle, _getFilePath, _checkFilePathDeprecation } from '../download'; import { join } from 'path'; +import http from 'http'; describe('kibana cli', function () { @@ -251,6 +252,150 @@ describe('kibana cli', function () { }); }); + after(function () { + nock.cleanAll(); + }); + + }); + + describe('proxy support', function () { + + const proxyPort = 2626; + const proxyUrl = `http://localhost:${proxyPort}`; + + let proxyHit = false; + + const proxy = http.createServer(function (req, res) { + proxyHit = true; + // Our test proxy simply returns an empty 200 response, since we only + // care about the download promise being resolved. + res.writeHead(200); + res.end(); + }); + + function expectProxyHit() { + expect(proxyHit).to.be(true); + } + + function expectNoProxyHit() { + expect(proxyHit).to.be(false); + } + + function nockPluginForUrl(url) { + nock(url) + .get('/plugin.zip') + .replyWithFile(200, join(__dirname, 'replies/test_plugin.zip')); + } + + before(function (done) { + proxy.listen(proxyPort, done); + }); + + beforeEach(function () { + proxyHit = false; + }); + + afterEach(function () { + delete process.env.http_proxy; + delete process.env.https_proxy; + delete process.env.no_proxy; + }); + + it('should use http_proxy env variable', function () { + process.env.http_proxy = proxyUrl; + settings.urls = ['http://example.com/plugin.zip']; + + return download(settings, logger) + .then(expectProxyHit); + }); + + it('should use https_proxy for secure URLs', function () { + process.env.https_proxy = proxyUrl; + settings.urls = ['https://example.com/plugin.zip']; + + return download(settings, logger) + .then(expectProxyHit); + }); + + it('should not use http_proxy for HTTPS urls', function () { + process.env.http_proxy = proxyUrl; + settings.urls = ['https://example.com/plugin.zip']; + + nockPluginForUrl('https://example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + it('should not use https_proxy for HTTP urls', function () { + process.env.https_proxy = proxyUrl; + settings.urls = ['http://example.com/plugin.zip']; + + nockPluginForUrl('http://example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + it('should support domains in no_proxy', function () { + process.env.http_proxy = proxyUrl; + process.env.no_proxy = 'foo.bar, example.com'; + settings.urls = ['http://example.com/plugin.zip']; + + nockPluginForUrl('http://example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + it('should support subdomains in no_proxy', function () { + process.env.http_proxy = proxyUrl; + process.env.no_proxy = 'foo.bar,plugins.example.com'; + settings.urls = ['http://plugins.example.com/plugin.zip']; + + nockPluginForUrl('http://plugins.example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + it('should accept wildcard subdomains in no_proxy', function () { + process.env.http_proxy = proxyUrl; + process.env.no_proxy = 'foo.bar, .example.com'; + settings.urls = ['http://plugins.example.com/plugin.zip']; + + nockPluginForUrl('http://plugins.example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + it('should support asterisk wildcard no_proxy syntax', function () { + process.env.http_proxy = proxyUrl; + process.env.no_proxy = '*.example.com'; + settings.urls = ['http://plugins.example.com/plugin.zip']; + + nockPluginForUrl('http://plugins.example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + it('should support implicit ports in no_proxy', function () { + process.env.https_proxy = proxyUrl; + process.env.no_proxy = 'example.com:443'; + settings.urls = ['https://example.com/plugin.zip']; + + nockPluginForUrl('https://example.com'); + + return download(settings, logger) + .then(expectNoProxyHit); + }); + + after(function (done) { + proxy.close(done); + }); + }); }); diff --git a/src/cli_plugin/install/__tests__/kibana.js b/src/cli_plugin/install/__tests__/kibana.js index 15d6b25d27410..abc4e9558ebe8 100644 --- a/src/cli_plugin/install/__tests__/kibana.js +++ b/src/cli_plugin/install/__tests__/kibana.js @@ -1,5 +1,6 @@ import expect from 'expect.js'; import sinon from 'sinon'; +import mockFs from 'mock-fs'; import Logger from '../../lib/logger'; import { join } from 'path'; import rimraf from 'rimraf'; @@ -13,13 +14,15 @@ describe('kibana cli', function () { describe('kibana', function () { const testWorkingPath = join(__dirname, '.test.data'); const tempArchiveFilePath = join(testWorkingPath, 'archive.part'); + const pluginDir = join(__dirname, 'plugins'); const settings = { workingPath: testWorkingPath, tempArchiveFile: tempArchiveFilePath, plugin: 'test-plugin', version: '1.0.0', - plugins: [ { name: 'foo', path: join(testWorkingPath, 'foo') } ] + plugins: [ { name: 'foo' } ], + pluginDir }; const logger = new Logger(settings); @@ -130,13 +133,10 @@ describe('kibana cli', function () { }); describe('existingInstall', function () { - let testWorkingPath; let processExitStub; beforeEach(function () { processExitStub = sinon.stub(process, 'exit'); - testWorkingPath = join(__dirname, '.test.data'); - rimraf.sync(testWorkingPath); sinon.stub(logger, 'log'); sinon.stub(logger, 'error'); }); @@ -145,26 +145,23 @@ describe('kibana cli', function () { processExitStub.restore(); logger.log.restore(); logger.error.restore(); - rimraf.sync(testWorkingPath); }); - it('should throw an error if the workingPath already exists.', function () { - mkdirp.sync(settings.plugins[0].path); - existingInstall(settings, logger); + it('should throw an error if the plugin already exists.', function () { + mockFs({ [`${pluginDir}/foo`]: {} }); + existingInstall(settings, logger); expect(logger.error.firstCall.args[0]).to.match(/already exists/); expect(process.exit.called).to.be(true); + + mockFs.restore(); }); - it('should not throw an error if the workingPath does not exist.', function () { + it('should not throw an error if the plugin does not exist.', function () { existingInstall(settings, logger); expect(logger.error.called).to.be(false); }); - }); - }); - }); - }); diff --git a/src/cli_plugin/install/__tests__/pack.js b/src/cli_plugin/install/__tests__/pack.js index 0bd194b3212db..45713b43ef12c 100644 --- a/src/cli_plugin/install/__tests__/pack.js +++ b/src/cli_plugin/install/__tests__/pack.js @@ -98,10 +98,9 @@ describe('kibana cli', function () { }) .then(() => { expect(settings.plugins[0].name).to.be('test-plugin'); - expect(settings.plugins[0].folder).to.be('test-plugin'); + expect(settings.plugins[0].archivePath).to.be('kibana/test-plugin'); expect(settings.plugins[0].version).to.be('1.0.0'); expect(settings.plugins[0].kibanaVersion).to.be('1.0.0'); - expect(settings.plugins[0].platform).to.be(undefined); }); }); @@ -134,40 +133,28 @@ describe('kibana cli', function () { }) .then(() => { expect(settings.plugins[0].name).to.be('funger-plugin'); - expect(settings.plugins[0].file).to.be('kibana/funger-plugin/package.json'); - expect(settings.plugins[0].folder).to.be('funger-plugin'); + expect(settings.plugins[0].archivePath).to.be('kibana/funger-plugin'); expect(settings.plugins[0].version).to.be('1.0.0'); - expect(settings.plugins[0].platform).to.be(undefined); expect(settings.plugins[1].name).to.be('pdf'); - expect(settings.plugins[1].file).to.be('kibana/pdf-linux/package.json'); - expect(settings.plugins[1].folder).to.be('pdf-linux'); + expect(settings.plugins[1].archivePath).to.be('kibana/pdf-linux'); expect(settings.plugins[1].version).to.be('1.0.0'); - expect(settings.plugins[1].platform).to.be('linux'); expect(settings.plugins[2].name).to.be('pdf'); - expect(settings.plugins[2].file).to.be('kibana/pdf-win32/package.json'); - expect(settings.plugins[2].folder).to.be('pdf-win32'); + expect(settings.plugins[2].archivePath).to.be('kibana/pdf-win32'); expect(settings.plugins[2].version).to.be('1.0.0'); - expect(settings.plugins[2].platform).to.be('win32'); expect(settings.plugins[3].name).to.be('pdf'); - expect(settings.plugins[3].file).to.be('kibana/pdf-win64/package.json'); - expect(settings.plugins[3].folder).to.be('pdf-win64'); + expect(settings.plugins[3].archivePath).to.be('kibana/pdf-win64'); expect(settings.plugins[3].version).to.be('1.0.0'); - expect(settings.plugins[3].platform).to.be('win64'); expect(settings.plugins[4].name).to.be('pdf'); - expect(settings.plugins[4].file).to.be('kibana/pdf/package.json'); - expect(settings.plugins[4].folder).to.be('pdf'); + expect(settings.plugins[4].archivePath).to.be('kibana/pdf'); expect(settings.plugins[4].version).to.be('1.0.0'); - expect(settings.plugins[4].platform).to.be(undefined); expect(settings.plugins[5].name).to.be('test-plugin'); - expect(settings.plugins[5].file).to.be('kibana/test-plugin/package.json'); - expect(settings.plugins[5].folder).to.be('test-plugin'); + expect(settings.plugins[5].archivePath).to.be('kibana/test-plugin'); expect(settings.plugins[5].version).to.be('1.0.0'); - expect(settings.plugins[5].platform).to.be(undefined); }); }); diff --git a/src/cli_plugin/install/__tests__/replies/strip_test.zip b/src/cli_plugin/install/__tests__/replies/strip_test.zip deleted file mode 100644 index 81256c5ad4667..0000000000000 Binary files a/src/cli_plugin/install/__tests__/replies/strip_test.zip and /dev/null differ diff --git a/src/cli_plugin/install/__tests__/zip.js b/src/cli_plugin/install/__tests__/zip.js index 8bb3345e0c531..3b5e48027fa59 100644 --- a/src/cli_plugin/install/__tests__/zip.js +++ b/src/cli_plugin/install/__tests__/zip.js @@ -1,207 +1,83 @@ import expect from 'expect.js'; -import sinon from 'sinon'; -import glob from 'glob-all'; import rimraf from 'rimraf'; -import mkdirp from 'mkdirp'; -import Logger from '../../lib/logger'; -import { _downloadSingle } from '../download'; -import { join } from 'path'; -import { listFiles, extractFiles } from '../zip'; +import path from 'path'; +import os from 'os'; +import glob from 'glob'; +import { analyzeArchive, extractArchive, _isDirectory } from '../zip'; describe('kibana cli', function () { describe('zip', function () { + const repliesPath = path.resolve(__dirname, './replies'); + const archivePath = path.resolve(repliesPath, 'test_plugin.zip'); - const testWorkingPath = join(__dirname, '.test.data'); - const tempArchiveFilePath = join(testWorkingPath, 'archive.part'); - let logger; + let tempPath; - const settings = { - workingPath: testWorkingPath, - tempArchiveFile: tempArchiveFilePath, - plugin: 'test-plugin', - setPlugin: function () {} - }; - - function shouldReject() { - throw new Error('expected the promise to reject'); - } - - beforeEach(function () { - logger = new Logger(settings); - sinon.stub(logger, 'log'); - sinon.stub(logger, 'error'); - sinon.stub(settings, 'setPlugin'); - rimraf.sync(testWorkingPath); - mkdirp.sync(testWorkingPath); + beforeEach(() => { + const randomDir = Math.random().toString(36); + tempPath = path.resolve(os.tmpdir(), randomDir); }); - afterEach(function () { - logger.log.restore(); - logger.error.restore(); - settings.setPlugin.restore(); - rimraf.sync(testWorkingPath); + afterEach(() => { + rimraf.sync(tempPath); }); - function copyReplyFile(filename) { - const filePath = join(__dirname, 'replies', filename); - const sourceUrl = 'file://' + filePath.replace(/\\/g, '/'); - - return _downloadSingle(settings, logger, sourceUrl); - } - - describe('listFiles', function () { + describe('analyzeArchive', function () { + it('returns array of plugins', async () => { + const packages = await analyzeArchive(archivePath); + const plugin = packages[0]; - it('lists the files in the zip', function () { - return copyReplyFile('test_plugin.zip') - .then(() => { - return listFiles(settings.tempArchiveFile); - }) - .then((actual) => { - const expected = [ - 'elasticsearch/', - 'kibana/', - 'kibana/test-plugin/', - 'kibana/test-plugin/.gitignore', - 'kibana/test-plugin/extra file only in zip.txt', - 'kibana/test-plugin/index.js', - 'kibana/test-plugin/package.json', - 'kibana/test-plugin/public/', - 'kibana/test-plugin/public/app.js', - 'kibana/test-plugin/README.md', - 'logstash/' - ]; - - expect(actual).to.eql(expected); - }); + expect(packages).to.be.an(Array); + expect(plugin.name).to.be('test-plugin'); + expect(plugin.archivePath).to.be('kibana/test-plugin'); + expect(plugin.archive).to.be(archivePath); + expect(plugin.kibanaVersion).to.be('1.0.0'); }); - }); - describe('extractFiles', function () { - - describe('strip files parameter', function () { - - it('strips specified number of directories', function () { - - return copyReplyFile('strip_test.zip') - .then(() => { - return extractFiles(settings.tempArchiveFile, settings.workingPath, 1); - }) - .then(() => { - const files = glob.sync('**/*', { cwd: testWorkingPath }); - const expected = [ - '1 level deep.txt', - 'test-plugin', - 'test-plugin/2 levels deep.txt', - 'test-plugin/public', - 'test-plugin/public/3 levels deep.txt', - 'archive.part' - ]; - expect(files.sort()).to.eql(expected.sort()); - }); - - }); - - it('throws an exception if it tries to strip too many directories', function () { - - return copyReplyFile('strip_test.zip') - .then(() => { - return extractFiles(settings.tempArchiveFile, settings.workingPath, 2); - }) - .then(shouldReject, (err) => { - expect(err.message).to.match(/You cannot strip more levels than there are directories/i); - }); - - }); - - it('applies the filter before applying the strip directories logic', function () { - - return copyReplyFile('strip_test.zip') - .then(() => { - const filter = { - paths: [ - 'test-plugin' - ] - }; - - return extractFiles(settings.tempArchiveFile, settings.workingPath, 2, filter); - }) - .then(() => { - const files = glob.sync('**/*', { cwd: testWorkingPath }); - const expected = [ - '2 levels deep.txt', - 'public', - 'public/3 levels deep.txt', - 'archive.part' - ]; - expect(files.sort()).to.eql(expected.sort()); - }); - - }); - + describe('extractArchive', () => { + it('extracts files using the extractPath filter', async () => { + const archive = path.resolve(repliesPath, 'test_plugin_many.zip'); + + await extractArchive(archive, tempPath, 'kibana/test-plugin'); + const files = await glob.sync('**/*', { cwd: tempPath }); + + const expected = [ + 'extra file only in zip.txt', + 'index.js', + 'package.json', + 'public', + 'public/app.js', + 'README.md' + ]; + expect(files.sort()).to.eql(expected.sort()); }); + }); - it('extracts files using the files filter', function () { - return copyReplyFile('test_plugin_many.zip') - .then(() => { - const filter = { - files: [ - 'kibana/funger-plugin/extra file only in zip.txt', - 'kibana/funger-plugin/index.js', - 'kibana\\funger-plugin\\package.json' - ] - }; - - return extractFiles(settings.tempArchiveFile, settings.workingPath, 0, filter); - }) - .then(() => { - const files = glob.sync('**/*', { cwd: testWorkingPath }); - const expected = [ - 'kibana', - 'kibana/funger-plugin', - 'kibana/funger-plugin/extra file only in zip.txt', - 'kibana/funger-plugin/index.js', - 'kibana/funger-plugin/package.json', - 'archive.part' - ]; - expect(files.sort()).to.eql(expected.sort()); - }); - }); + it('handles a corrupt zip archive', async () => { + try { + await extractArchive(path.resolve(repliesPath, 'corrupt.zip')); + throw new Error('This should have failed'); + } catch(e) { + return; + } + }); + }); - it('extracts files using the paths filter', function () { - return copyReplyFile('test_plugin_many.zip') - .then(() => { - const filter = { - paths: [ - 'kibana/funger-plugin', - 'kibana/test-plugin/public' - ] - }; + describe('_isDirectory', () => { + it('should check for a forward slash', () => { + expect(_isDirectory('/foo/bar/')).to.be(true); + }); - return extractFiles(settings.tempArchiveFile, settings.workingPath, 0, filter); - }) - .then(() => { - const files = glob.sync('**/*', { cwd: testWorkingPath }); - const expected = [ - 'archive.part', - 'kibana', - 'kibana/funger-plugin', - 'kibana/funger-plugin/README.md', - 'kibana/funger-plugin/extra file only in zip.txt', - 'kibana/funger-plugin/index.js', - 'kibana/funger-plugin/package.json', - 'kibana/funger-plugin/public', - 'kibana/funger-plugin/public/app.js', - 'kibana/test-plugin', - 'kibana/test-plugin/public', - 'kibana/test-plugin/public/app.js' - ]; - expect(files.sort()).to.eql(expected.sort()); - }); - }); + it('should check for a backslash', () => { + expect(_isDirectory('\\foo\\bar\\')).to.be(true); }); + it('should return false for files', () => { + expect(_isDirectory('foo.txt')).to.be(false); + expect(_isDirectory('\\path\\to\\foo.txt')).to.be(false); + expect(_isDirectory('/path/to/foo.txt')).to.be(false); + }); }); }); diff --git a/src/cli_plugin/install/downloaders/http.js b/src/cli_plugin/install/downloaders/http.js index f8c28d65b7733..1c261be5643e5 100644 --- a/src/cli_plugin/install/downloaders/http.js +++ b/src/cli_plugin/install/downloaders/http.js @@ -2,11 +2,31 @@ import Wreck from 'wreck'; import Progress from '../progress'; import { fromNode as fn } from 'bluebird'; import { createWriteStream } from 'fs'; +import HttpProxyAgent from 'http-proxy-agent'; +import { getProxyForUrl } from 'proxy-from-env'; -function sendRequest({ sourceUrl, timeout }) { +function getProxyAgent(sourceUrl, logger) { + const proxy = getProxyForUrl(sourceUrl); + + if (!proxy) { + return null; + } + + logger.log(`Picked up proxy ${proxy} from environment variable.`); + return new HttpProxyAgent(proxy); +} + +function sendRequest({ sourceUrl, timeout }, logger) { const maxRedirects = 11; //Because this one goes to 11. return fn(cb => { - const req = Wreck.request('GET', sourceUrl, { timeout, redirects: maxRedirects }, (err, resp) => { + const reqOptions = { timeout, redirects: maxRedirects }; + const proxyAgent = getProxyAgent(sourceUrl, logger); + + if (proxyAgent) { + reqOptions.agent = proxyAgent; + } + + const req = Wreck.request('GET', sourceUrl, reqOptions, (err, resp) => { if (err) { if (err.code === 'ECONNREFUSED') { err = new Error('ENOTFOUND'); @@ -50,7 +70,7 @@ Responsible for managing http transfers */ export default async function downloadUrl(logger, sourceUrl, targetPath, timeout) { try { - const { req, resp } = await sendRequest({ sourceUrl, timeout }); + const { req, resp } = await sendRequest({ sourceUrl, timeout }, logger); try { const totalSize = parseFloat(resp.headers['content-length']) || 0; diff --git a/src/cli_plugin/install/index.js b/src/cli_plugin/install/index.js index d34160ff82c2f..b9a20012d8e94 100644 --- a/src/cli_plugin/install/index.js +++ b/src/cli_plugin/install/index.js @@ -1,7 +1,6 @@ -import { fromRoot } from '../../utils'; +import { fromRoot, pkg } from '../../utils'; import install from './install'; import Logger from '../lib/logger'; -import pkg from '../../utils/package_json'; import { getConfig } from '../../server/path'; import { parse, parseMilliseconds } from './settings'; import logWarnings from '../lib/log_warnings'; diff --git a/src/cli_plugin/install/install.js b/src/cli_plugin/install/install.js index 32bbbe79eb450..0c79f2f57c6e9 100644 --- a/src/cli_plugin/install/install.js +++ b/src/cli_plugin/install/install.js @@ -1,5 +1,6 @@ import { download } from './download'; import Promise from 'bluebird'; +import path from 'path'; import { cleanPrevious, cleanArtifacts } from './cleanup'; import { extract, getPackData } from './pack'; import { renamePlugin } from './rename'; @@ -27,7 +28,7 @@ export default async function install(settings, logger) { assertVersion(settings); - await renamePlugin(settings.workingPath, settings.plugins[0].path); + await renamePlugin(settings.workingPath, path.join(settings.pluginDir, settings.plugins[0].name)); await rebuildCache(settings, logger); diff --git a/src/cli_plugin/install/kibana.js b/src/cli_plugin/install/kibana.js index 11738a53277b3..5adcfc5f3ed17 100644 --- a/src/cli_plugin/install/kibana.js +++ b/src/cli_plugin/install/kibana.js @@ -1,4 +1,5 @@ import _ from 'lodash'; +import path from 'path'; import { fromRoot } from '../../utils'; import KbnServer from '../../server/kbn_server'; import readYamlConfig from '../../cli/serve/read_yaml_config'; @@ -7,7 +8,7 @@ import { statSync } from 'fs'; export function existingInstall(settings, logger) { try { - statSync(settings.plugins[0].path); + statSync(path.join(settings.pluginDir, settings.plugins[0].name)); logger.error(`Plugin ${settings.plugins[0].name} already exists, please remove before installing a new version`); process.exit(70); // eslint-disable-line no-process-exit diff --git a/src/cli_plugin/install/pack.js b/src/cli_plugin/install/pack.js index 669c4c64bd88f..c4bcad1846eb7 100644 --- a/src/cli_plugin/install/pack.js +++ b/src/cli_plugin/install/pack.js @@ -1,51 +1,10 @@ -import _ from 'lodash'; -import { listFiles, extractFiles } from './zip'; -import { resolve } from 'path'; -import { sync as rimrafSync } from 'rimraf'; +import { analyzeArchive, extractArchive } from './zip'; import validate from 'validate-npm-package-name'; -/** - * Returns an array of package objects. There will be one for each of - * package.json files in the archive - * @param {object} settings - a plugin installer settings object - */ -async function listPackages(settings) { - const regExp = new RegExp('(kibana/([^/]+))/package.json', 'i'); - const archiveFiles = await listFiles(settings.tempArchiveFile); - - return _(archiveFiles) - .map(file => file.replace(/\\/g, '/')) - .map(file => file.match(regExp)) - .compact() - .map(([ file, , folder ]) => ({ file, folder })) - .uniq() - .value(); -} - -/** - * Extracts the package.json files into the workingPath - * @param {object} settings - a plugin installer settings object - * @param {array} packages - array of package objects from listPackages() - */ -async function extractPackageFiles(settings, packages) { - const filter = { - files: packages.map((pkg) => pkg.file) - }; - await extractFiles(settings.tempArchiveFile, settings.workingPath, 0, filter); -} - -/** - * Deletes the package.json files created by extractPackageFiles() - * @param {object} settings - a plugin installer settings object - */ -function deletePackageFiles(settings) { - const fullPath = resolve(settings.workingPath, 'kibana'); - rimrafSync(fullPath); -} - /** * Checks the plugin name. Will throw an exception if it does not meet * npm package naming conventions + * * @param {object} plugin - a package object from listPackages() */ function assertValidPackageName(plugin) { @@ -55,89 +14,47 @@ function assertValidPackageName(plugin) { } } -/** - * Examine each package.json file to determine the plugin name, - * version, kibanaVersion, and platform. Mutates the package objects - * in the packages array - * @param {object} settings - a plugin installer settings object - * @param {array} packages - array of package objects from listPackages() - */ -async function mergePackageData(settings, packages) { - return packages.map((pkg) => { - const fullPath = resolve(settings.workingPath, pkg.file); - const packageInfo = require(fullPath); - - pkg.version = _.get(packageInfo, 'version'); - pkg.name = _.get(packageInfo, 'name'); - pkg.path = resolve(settings.pluginDir, pkg.name); - - // Plugins must specify their version, and by default that version should match - // the version of kibana down to the patch level. If these two versions need - // to diverge, they can specify a kibana.version to indicate the version of - // kibana the plugin is intended to work with. - pkg.kibanaVersion = _.get(packageInfo, 'kibana.version', pkg.version); - - const regExp = new RegExp(`${pkg.name}-(.+)`, 'i'); - const matches = pkg.folder.match(regExp); - pkg.platform = (matches) ? matches[1] : undefined; - - return pkg; - }); -} - -/** - * Extracts the first plugin in the archive. - * NOTE: This will need to be changed in later versions of the pack installer - * that allow for the installation of more than one plugin at once. - * @param {object} settings - a plugin installer settings object - */ -async function extractArchive(settings) { - const filter = { - paths: [ `kibana/${settings.plugins[0].folder}` ] - }; - - await extractFiles(settings.tempArchiveFile, settings.workingPath, 2, filter); -} - - /** * Returns the detailed information about each kibana plugin in the pack. * TODO: If there are platform specific folders, determine which one to use. + * * @param {object} settings - a plugin installer settings object * @param {object} logger - a plugin installer logger object */ export async function getPackData(settings, logger) { - let packages; + let packages = []; + logger.log('Retrieving metadata from plugin archive'); try { - logger.log('Retrieving metadata from plugin archive'); - - packages = await listPackages(settings); - - await extractPackageFiles(settings, packages); - await mergePackageData(settings, packages); - await deletePackageFiles(settings); + packages = await analyzeArchive(settings.tempArchiveFile); } catch (err) { - logger.error(err); + logger.error(err.stack); throw new Error('Error retrieving metadata from plugin archive'); } if (packages.length === 0) { throw new Error('No kibana plugins found in archive'); } - packages.forEach(assertValidPackageName); + packages.forEach(assertValidPackageName); settings.plugins = packages; } +/** + * Extracts files from a zip archive to a file path using a filter function + * + * @param {string} archive - file path to a zip archive + * @param {string} targetDir - directory path to where the files should + * extracted + */ export async function extract(settings, logger) { try { - logger.log('Extracting plugin archive'); - - await extractArchive(settings); + const plugin = settings.plugins[0]; + logger.log('Extracting plugin archive'); + await extractArchive(settings.tempArchiveFile, settings.workingPath, plugin.archivePath); logger.log('Extraction complete'); } catch (err) { - logger.error(err); + logger.error(err.stack); throw new Error('Error extracting plugin archive'); } } diff --git a/src/cli_plugin/install/zip.js b/src/cli_plugin/install/zip.js index 84ab58fbf18da..a9445748dc205 100644 --- a/src/cli_plugin/install/zip.js +++ b/src/cli_plugin/install/zip.js @@ -1,98 +1,126 @@ -import _ from 'lodash'; -import DecompressZip from '@bigfunger/decompress-zip'; - -const SYMBOLIC_LINK = 'SymbolicLink'; +import yauzl from 'yauzl'; +import path from 'path'; +import mkdirp from 'mkdirp'; +import { createWriteStream } from 'fs'; +import { get } from 'lodash'; /** - * Creates a filter function to be consumed by extractFiles that filters by - * an array of files - * @param {array} files - an array of full file paths to extract. Should match - * exactly a value from listFiles + * Returns an array of package objects. There will be one for each of + * package.json files in the archive + * + * @param {string} archive - path to plugin archive zip file */ -function extractFilterFromFiles(files) { - const filterFiles = files.map((file) => file.replace(/\\/g, '/')); - return function filterByFiles(file) { - if (file.type === SYMBOLIC_LINK) return false; - - const path = file.path.replace(/\\/g, '/'); - return _.includes(filterFiles, path); - }; -} -/** - * Creates a filter function to be consumed by extractFiles that filters by - * an array of root paths - * @param {array} paths - an array of root paths from the archive. All files and - * folders will be extracted recursively using these paths as roots. - */ -function extractFilterFromPaths(paths) { - return function filterByRootPath(file) { - if (file.type === SYMBOLIC_LINK) return false; +export function analyzeArchive(archive) { + const plugins = []; + const regExp = new RegExp('(kibana[\\\\/][^\\\\/]+)[\\\\/]package\.json', 'i'); + + return new Promise ((resolve, reject) => { + yauzl.open(archive, { lazyEntries: true }, function (err, zipfile) { + if (err) { + return reject(err); + } + + zipfile.readEntry(); + zipfile.on('entry', function (entry) { + const match = entry.fileName.match(regExp); + + if (!match) { + return zipfile.readEntry(); + } + + zipfile.openReadStream(entry, function (err, readable) { + const chunks = []; + + if (err) { + return reject(err); + } + + readable.on('data', chunk => chunks.push(chunk)); + + readable.on('end', function () { + const contents = Buffer.concat(chunks).toString(); + const pkg = JSON.parse(contents); + + plugins.push(Object.assign(pkg, { + archivePath: match[1], + archive: archive, - return paths.some(path => { - const regex = new RegExp(`${path}($|/)`, 'i'); - return file.parent.match(regex); + // Plugins must specify their version, and by default that version should match + // the version of kibana down to the patch level. If these two versions need + // to diverge, they can specify a kibana.version to indicate the version of + // kibana the plugin is intended to work with. + kibanaVersion: get(pkg, 'kibana.version', pkg.version) + })); + + zipfile.readEntry(); + }); + }); + }); + + zipfile.on('close', () => { + resolve(plugins); + }); }); - }; + }); } -/** - * Creates a filter function to be consumed by extractFiles - * @param {object} filter - an object with either a files or paths property. - */ -function extractFilter(filter) { - if (filter.files) return extractFilterFromFiles(filter.files); - if (filter.paths) return extractFilterFromPaths(filter.paths); - return _.noop; +const isDirectoryRegex = /(\/|\\)$/; +export function _isDirectory(filename) { + return isDirectoryRegex.test(filename); } -/** - * Extracts files from a zip archive to a file path using a filter function - * @param {string} zipPath - file path to a zip archive - * @param {string} targetPath - directory path to where the files should - * extracted - * @param {integer} strip - Number of nested directories within the archive - * that should be ignored when determining the target path of an archived - * file. - * @param {function} filter - A function that accepts a single parameter 'file' - * and returns true if the file should be extracted from the archive - */ -export async function extractFiles(zipPath, targetPath, strip, filter) { - await new Promise((resolve, reject) => { - const unzipper = new DecompressZip(zipPath); +export function extractArchive(archive, targetDir, extractPath) { + return new Promise((resolve, reject) => { + yauzl.open(archive, { lazyEntries: true }, function (err, zipfile) { + if (err) { + return reject(err); + } - unzipper.on('error', reject); + zipfile.readEntry(); + zipfile.on('close', resolve); + zipfile.on('entry', function (entry) { + let fileName = entry.fileName; - const options = { - path: targetPath, - strip: strip - }; - if (filter) { - options.filter = extractFilter(filter); - } + if (extractPath && fileName.startsWith(extractPath)) { + fileName = fileName.substring(extractPath.length); + } else { + return zipfile.readEntry(); + } - unzipper.extract(options); + if (targetDir) { + fileName = path.join(targetDir, fileName); + } - unzipper.on('extract', resolve); - }); -} + if (_isDirectory(fileName)) { + mkdirp(fileName, function (err) { + if (err) { + return reject(err); + } -/** - * Returns all files within an archive - * @param {string} zipPath - file path to a zip archive - * @returns {array} all files within an archive with their relative paths - */ -export async function listFiles(zipPath) { - return await new Promise((resolve, reject) => { - const unzipper = new DecompressZip(zipPath); + zipfile.readEntry(); + }); + } else { + // file entry + zipfile.openReadStream(entry, function (err, readStream) { + if (err) { + return reject(err); + } - unzipper.on('error', reject); + // ensure parent directory exists + mkdirp(path.dirname(fileName), function (err) { + if (err) { + return reject(err); + } - unzipper.on('list', (files) => { - files = files.map((file) => file.replace(/\\/g, '/')); - resolve(files); + readStream.pipe(createWriteStream(fileName)); + readStream.on('end', function () { + zipfile.readEntry(); + }); + }); + }); + } + }); }); - - unzipper.list(); }); } diff --git a/src/cli_plugin/list/__tests__/settings.js b/src/cli_plugin/list/__tests__/settings.js index 433037c12114b..d2b127e166b21 100644 --- a/src/cli_plugin/list/__tests__/settings.js +++ b/src/cli_plugin/list/__tests__/settings.js @@ -1,5 +1,5 @@ import expect from 'expect.js'; -import fromRoot from '../../../utils/from_root'; +import { fromRoot } from '../../../utils'; import { parse } from '../settings'; describe('kibana cli', function () { diff --git a/src/cli_plugin/remove/__tests__/settings.js b/src/cli_plugin/remove/__tests__/settings.js index 0d031f820829f..012f54af2e003 100644 --- a/src/cli_plugin/remove/__tests__/settings.js +++ b/src/cli_plugin/remove/__tests__/settings.js @@ -1,5 +1,5 @@ import expect from 'expect.js'; -import fromRoot from '../../../utils/from_root'; +import { fromRoot } from '../../../utils'; import { parse } from '../settings'; describe('kibana cli', function () { diff --git a/src/core_plugins/console/__tests__/index.js b/src/core_plugins/console/__tests__/index.js index 4cc0a69d96659..8489a7a532e70 100644 --- a/src/core_plugins/console/__tests__/index.js +++ b/src/core_plugins/console/__tests__/index.js @@ -21,7 +21,7 @@ describe('plugins/console', function () { }; }); - context('proxyConfig', function () { + describe('proxyConfig', function () { it('leaves the proxyConfig settings', function () { const proxyConfigOne = {}; const proxyConfigTwo = {}; diff --git a/src/core_plugins/console/api_server/api.js b/src/core_plugins/console/api_server/api.js index 7b1f4f2acd36e..671d8c7a13982 100644 --- a/src/core_plugins/console/api_server/api.js +++ b/src/core_plugins/console/api_server/api.js @@ -42,4 +42,4 @@ function Api(name) { }(Api.prototype)); -module.exports = Api; +export default Api; diff --git a/src/core_plugins/console/api_server/es_5_0.js b/src/core_plugins/console/api_server/es_5_0.js index e1f5ae817b475..8443c2813cd42 100644 --- a/src/core_plugins/console/api_server/es_5_0.js +++ b/src/core_plugins/console/api_server/es_5_0.js @@ -16,6 +16,7 @@ let parts = [ require('./es_5_0/mappings'), require('./es_5_0/percolator'), require('./es_5_0/query'), + require('./es_5_0/reindex'), require('./es_5_0/snapshot_restore'), require('./es_5_0/search'), require('./es_5_0/settings'), @@ -52,4 +53,6 @@ ES_5_0.prototype = _.create(Api.prototype, { 'constructor': ES_5_0 }); }; })(ES_5_0.prototype); -module.exports = new ES_5_0(); +const instance = new ES_5_0(); + +export default instance; \ No newline at end of file diff --git a/src/core_plugins/console/api_server/es_5_0/aggregations.js b/src/core_plugins/console/api_server/es_5_0/aggregations.js index aa653f1beec75..fff4655c56a27 100644 --- a/src/core_plugins/console/api_server/es_5_0/aggregations.js +++ b/src/core_plugins/console/api_server/es_5_0/aggregations.js @@ -435,8 +435,9 @@ var rules = { } } }; -module.exports = function (api) { + +export default function (api) { api.addGlobalAutocompleteRules('aggregations', rules); api.addGlobalAutocompleteRules('aggs', rules); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/aliases.js b/src/core_plugins/console/api_server/es_5_0/aliases.js index c17dee4aa0bd7..675c35c1b33fe 100644 --- a/src/core_plugins/console/api_server/es_5_0/aliases.js +++ b/src/core_plugins/console/api_server/es_5_0/aliases.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_post_aliases', { methods: ['POST'], patterns: [ @@ -68,4 +68,4 @@ module.exports = function (api) { api.addGlobalAutocompleteRules('aliases', { '*': aliasRules }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/cat.js b/src/core_plugins/console/api_server/es_5_0/cat.js index 0da08cd066e0c..2411bb7db2458 100644 --- a/src/core_plugins/console/api_server/es_5_0/cat.js +++ b/src/core_plugins/console/api_server/es_5_0/cat.js @@ -32,7 +32,7 @@ function addNodeattrsCat(api) { }); } -module.exports = function (api) { +export default function (api) { addSimpleCat('_cat/aliases', api); addSimpleCat('_cat/allocation', api, null, ['_cat/allocation', '_cat/allocation/{nodes}']); addSimpleCat('_cat/count', api); @@ -53,4 +53,4 @@ module.exports = function (api) { addSimpleCat('_cat/plugins', api); addSimpleCat('_cat/segments', api); addNodeattrsCat(api); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/cluster.js b/src/core_plugins/console/api_server/es_5_0/cluster.js index a29e45c6e1db2..0dd9f0fe1bce8 100644 --- a/src/core_plugins/console/api_server/es_5_0/cluster.js +++ b/src/core_plugins/console/api_server/es_5_0/cluster.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_cluster/nodes/stats'); api.addEndpointDescription('_cluster/state', { patterns: [ @@ -139,4 +139,4 @@ module.exports = function (api) { dry_run: { __one_of: [true, false] } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/count.js b/src/core_plugins/console/api_server/es_5_0/count.js index 6bbcb3a9f6b85..fe44bdef271ad 100644 --- a/src/core_plugins/console/api_server/es_5_0/count.js +++ b/src/core_plugins/console/api_server/es_5_0/count.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_count', { methods: ['GET', 'POST'], priority: 10, // collides with get doc by id @@ -19,4 +19,4 @@ module.exports = function (api) { } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/document.js b/src/core_plugins/console/api_server/es_5_0/document.js index 47762ef5b757e..1c0537a2ca701 100644 --- a/src/core_plugins/console/api_server/es_5_0/document.js +++ b/src/core_plugins/console/api_server/es_5_0/document.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_get_doc', { methods: ['GET'], patterns: [ @@ -230,4 +230,4 @@ module.exports = function (api) { } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/field_stats.js b/src/core_plugins/console/api_server/es_5_0/field_stats.js index 58c35e6ed0eb3..1b8fcc3f7b617 100644 --- a/src/core_plugins/console/api_server/es_5_0/field_stats.js +++ b/src/core_plugins/console/api_server/es_5_0/field_stats.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_field_stats', { methods: ['GET', 'POST'], patterns: [ @@ -44,4 +44,4 @@ module.exports = function (api) { } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/filter.js b/src/core_plugins/console/api_server/es_5_0/filter.js index bcaae01055144..a66a702a1792b 100644 --- a/src/core_plugins/console/api_server/es_5_0/filter.js +++ b/src/core_plugins/console/api_server/es_5_0/filter.js @@ -330,7 +330,7 @@ filters.nested = { _name: '' }; -module.exports = function (api) { +export default function (api) { api.addGlobalAutocompleteRules('filter', filters); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/globals.js b/src/core_plugins/console/api_server/es_5_0/globals.js index e8cd849226151..c0b49f758f215 100644 --- a/src/core_plugins/console/api_server/es_5_0/globals.js +++ b/src/core_plugins/console/api_server/es_5_0/globals.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addGlobalAutocompleteRules('highlight', { pre_tags: {}, post_tags: {}, @@ -21,4 +21,4 @@ module.exports = function (api) { lang: "", params: {} }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/indices.js b/src/core_plugins/console/api_server/es_5_0/indices.js index a8772a86d55e7..9dc7b4f927665 100644 --- a/src/core_plugins/console/api_server/es_5_0/indices.js +++ b/src/core_plugins/console/api_server/es_5_0/indices.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_refresh', { methods: ['POST'], patterns: [ @@ -231,4 +231,4 @@ module.exports = function (api) { "{indices}/_open" ] }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/ingest.js b/src/core_plugins/console/api_server/es_5_0/ingest.js index 6ce1251a7c4d9..989b10361eb1d 100644 --- a/src/core_plugins/console/api_server/es_5_0/ingest.js +++ b/src/core_plugins/console/api_server/es_5_0/ingest.js @@ -329,7 +329,7 @@ const simulateUrlParamsDefinition = { "verbose": "__flag__" }; -module.exports = function (api) { +export default function (api) { // Note: this isn't an actual API endpoint. It exists so the forEach processor's "processor" field // may recursively use the autocomplete rules for any processor. @@ -383,4 +383,4 @@ module.exports = function (api) { ] } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/mappings.js b/src/core_plugins/console/api_server/es_5_0/mappings.js index 93709de58657f..a8bbda676b67a 100644 --- a/src/core_plugins/console/api_server/es_5_0/mappings.js +++ b/src/core_plugins/console/api_server/es_5_0/mappings.js @@ -4,7 +4,7 @@ var BOOLEAN = { __one_of: [true, false] }; -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_get_mapping', { methods: ['GET'], priority: 10, // collides with get doc by id @@ -216,4 +216,4 @@ module.exports = function (api) { } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/nodes.js b/src/core_plugins/console/api_server/es_5_0/nodes.js index 889943678fb4b..04ea998b48f55 100644 --- a/src/core_plugins/console/api_server/es_5_0/nodes.js +++ b/src/core_plugins/console/api_server/es_5_0/nodes.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_nodes/hot_threads', { methods: ['GET'], patterns: [ @@ -75,4 +75,4 @@ module.exports = function (api) { ] } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/percolator.js b/src/core_plugins/console/api_server/es_5_0/percolator.js index 100a0889a8ada..ce3b07fcb43ff 100644 --- a/src/core_plugins/console/api_server/es_5_0/percolator.js +++ b/src/core_plugins/console/api_server/es_5_0/percolator.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_put_percolator', { priority: 10, // to override doc methods: ['PUT', 'POST'], @@ -87,4 +87,4 @@ module.exports = function (api) { filter: {} } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/query.js b/src/core_plugins/console/api_server/es_5_0/query.js index db4f55dd384bd..e6769b66eeaa0 100644 --- a/src/core_plugins/console/api_server/es_5_0/query.js +++ b/src/core_plugins/console/api_server/es_5_0/query.js @@ -67,7 +67,7 @@ var DECAY_FUNC_DESC = { } }; -module.exports = function (api) { +export default function (api) { api.addGlobalAutocompleteRules('query', { match: { __template: { @@ -141,7 +141,7 @@ module.exports = function (api) { filter: { __scope_link: 'GLOBAL.filter' }, - minimum_number_should_match: 1, + minimum_should_match: 1, boost: 1.0 }, boosting: { @@ -624,4 +624,4 @@ module.exports = function (api) { }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/reindex.js b/src/core_plugins/console/api_server/es_5_0/reindex.js new file mode 100644 index 0000000000000..6cf57cc54e6dc --- /dev/null +++ b/src/core_plugins/console/api_server/es_5_0/reindex.js @@ -0,0 +1,57 @@ +export default function (api) { + + api.addEndpointDescription('_post_reindex', { + methods: [ 'POST' ], + patterns: [ + '_reindex' + ], + url_params: { + refresh: '__flag__', + wait_for_completion: 'true', + wait_for_active_shards: 1, + timeout: '1m', + requests_per_second: 0, + slices: 1 + }, + data_autocomplete_rules: { + __template: { + 'source': {}, + 'dest': {} + }, + 'source': { + 'index': '', + 'type': '', + 'query': { + __scope_link: 'GLOBAL.query' + }, + 'sort': { + __template: { + 'FIELD': 'desc' + }, + 'FIELD': { __one_of: [ 'asc', 'desc' ] } + }, + 'size': 1000, + 'remote': { + __template: { + 'host': '', + }, + 'host': '', + 'username': '', + 'password': '', + 'socket_timeout': '30s', + 'connect_timeout': '30s' + } + }, + 'dest': { + 'index': '', + 'version_type': { __one_of: [ 'internal', 'external' ] }, + 'op_type': 'create', + 'routing': { __one_of: [ 'keep', 'discard', '=SOME TEXT'] }, + 'pipeline': '' + }, + 'conflicts': 'proceed', + 'size': 10, + 'script': { __scope_link: 'GLOBAL.script' }, + } + }) +} \ No newline at end of file diff --git a/src/core_plugins/console/api_server/es_5_0/search.js b/src/core_plugins/console/api_server/es_5_0/search.js index c2f1e7f72f6a2..edadb55822e25 100644 --- a/src/core_plugins/console/api_server/es_5_0/search.js +++ b/src/core_plugins/console/api_server/es_5_0/search.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_search', { methods: ['GET', 'POST'], priority: 10, // collides with get doc by id @@ -252,4 +252,4 @@ module.exports = function (api) { local: "__flag__" } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/settings.js b/src/core_plugins/console/api_server/es_5_0/settings.js index 1cfdcad763083..54a546b100598 100644 --- a/src/core_plugins/console/api_server/es_5_0/settings.js +++ b/src/core_plugins/console/api_server/es_5_0/settings.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_get_settings', { patterns: [ @@ -83,4 +83,4 @@ module.exports = function (api) { } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/snapshot_restore.js b/src/core_plugins/console/api_server/es_5_0/snapshot_restore.js index 42e7bc030ed78..040df945a9a72 100644 --- a/src/core_plugins/console/api_server/es_5_0/snapshot_restore.js +++ b/src/core_plugins/console/api_server/es_5_0/snapshot_restore.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('restore_snapshot', { methods: ['POST'], patterns: [ @@ -126,4 +126,4 @@ module.exports = function (api) { } } }); -}; +} diff --git a/src/core_plugins/console/api_server/es_5_0/templates.js b/src/core_plugins/console/api_server/es_5_0/templates.js index 5646be3e4e252..190bf352f1373 100644 --- a/src/core_plugins/console/api_server/es_5_0/templates.js +++ b/src/core_plugins/console/api_server/es_5_0/templates.js @@ -1,4 +1,4 @@ -module.exports = function (api) { +export default function (api) { api.addEndpointDescription('_delete_template', { methods: ['DELETE'], patterns: [ @@ -24,4 +24,4 @@ module.exports = function (api) { settings: { __scope_link: '_put_settings' } } }); -}; +} diff --git a/src/core_plugins/console/api_server/server.js b/src/core_plugins/console/api_server/server.js index ced1cbb38488f..9782af96071fd 100644 --- a/src/core_plugins/console/api_server/server.js +++ b/src/core_plugins/console/api_server/server.js @@ -1,6 +1,6 @@ let _ = require("lodash"); -module.exports.resolveApi = function (sense_version, apis, reply) { +export function resolveApi(sense_version, apis, reply) { let result = {}; _.each(apis, function (name) { { @@ -11,4 +11,4 @@ module.exports.resolveApi = function (sense_version, apis, reply) { }); return reply(result).type("application/json"); -}; +} diff --git a/src/core_plugins/console/index.js b/src/core_plugins/console/index.js index 6813871dd4909..0f4d29e8ffff0 100644 --- a/src/core_plugins/console/index.js +++ b/src/core_plugins/console/index.js @@ -1,8 +1,9 @@ import Boom from 'boom'; -import apiServer from './api_server/server'; +import { resolveApi } from './api_server/server'; import { existsSync } from 'fs'; import { resolve, join, sep } from 'path'; -import { has } from 'lodash'; +import { has, isEmpty } from 'lodash'; +import setHeaders from '../elasticsearch/lib/set_headers'; import { ProxyConfigCollection, @@ -83,9 +84,10 @@ export default function (kibana) { pathFilters: proxyPathFilters, getConfigForReq(req, uri) { const whitelist = config.get('elasticsearch.requestHeadersWhitelist'); - const headers = filterHeaders(req.headers, whitelist); + const filteredHeaders = filterHeaders(req.headers, whitelist); + const headers = setHeaders(filteredHeaders, config.get('elasticsearch.customHeaders')); - if (config.has('console.proxyConfig')) { + if (!isEmpty(config.get('console.proxyConfig'))) { return { ...proxyConfigCollection.configForUri(uri), headers, @@ -109,7 +111,7 @@ export default function (kibana) { return; } - return apiServer.resolveApi(sense_version, apis.split(','), reply); + return resolveApi(sense_version, apis.split(','), reply); } }); diff --git a/src/core_plugins/console/public/css/sense.less b/src/core_plugins/console/public/css/sense.less index 20fcebbc7fe7e..e63f82093ec70 100644 --- a/src/core_plugins/console/public/css/sense.less +++ b/src/core_plugins/console/public/css/sense.less @@ -60,16 +60,16 @@ sense-history-viewer, display: flex; flex: 0 0 13px; cursor: ew-resize; - background-color: #e4e4e4; + background-color: @globalColorLightestGray; align-items: center; margin: -10px 0; &:hover { - background-color: darken(#e4e4e4, 10%); + background-color: lighten(@globalColorBlue, 50%); } &.active { - background-color: darken(#e4e4e4, 40%);; + background-color: @globalColorBlue; } } diff --git a/src/core_plugins/console/public/css/sense.light.less b/src/core_plugins/console/public/css/sense.light.less index 5b2ef80a882ed..966198d21a164 100644 --- a/src/core_plugins/console/public/css/sense.light.less +++ b/src/core_plugins/console/public/css/sense.light.less @@ -19,7 +19,7 @@ } .ace_scroller { - border-left: 1px solid #CCC; + border-left: 1px solid @globalColorLightGray; } .ace_multi_string { diff --git a/src/core_plugins/console/public/hacks/register.js b/src/core_plugins/console/public/hacks/register.js index 1476a6d47d1cb..c9f94a64f8f08 100644 --- a/src/core_plugins/console/public/hacks/register.js +++ b/src/core_plugins/console/public/hacks/register.js @@ -1,5 +1,6 @@ -import devTools from 'ui/registry/dev_tools'; -devTools.register(() => ({ +import { DevToolsRegistryProvider } from 'ui/registry/dev_tools'; + +DevToolsRegistryProvider.register(() => ({ order: 1, name: 'console', display: 'Console', diff --git a/src/core_plugins/console/public/index.html b/src/core_plugins/console/public/index.html index b5eb21a4410f0..ceaa070f41095 100644 --- a/src/core_plugins/console/public/index.html +++ b/src/core_plugins/console/public/index.html @@ -12,11 +12,20 @@ - - + + -