diff --git a/.gitignore b/.gitignore index cf7b08c8efe8b..56b859e0c6c38 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ config/kibana.dev.yml coverage selenium .babelcache.json +*.swp +*.swo diff --git a/README.md b/README.md index 6987169fe55c9..a30b031815ebf 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Kibana 4.3.0-snapshot +# Kibana 5.0.0-snapshot [![Build Status](https://travis-ci.org/elastic/kibana.svg?branch=master)](https://travis-ci.org/elastic/kibana?branch=master) @@ -6,7 +6,7 @@ Kibana is an open source ([Apache Licensed](https://github.com/elastic/kibana/bl ## Requirements -- Elasticsearch version 2.0.0 or later +- Elasticsearch version 2.1.0 or later - Kibana binary package ## Installation @@ -21,8 +21,6 @@ You're up and running! Fantastic! Kibana is now running on port 5601, so point y The first screen you arrive at will ask you to configure an **index pattern**. An index pattern describes to Kibana how to access your data. We make the guess that you're working with log data, and we hope (because it's awesome) that you're working with Logstash. By default, we fill in `logstash-*` as your index pattern, thus the only thing you need to do is select which field contains the timestamp you'd like to use. Kibana reads your Elasticsearch mapping to find your time fields - select one from the list and hit *Create*. -**Tip:** there's an optimization in the way of the *Use event times to create index names* option. Since Logstash creates an index every day, Kibana uses that fact to only search indices that could possibly contain data in your selected time range. - Congratulations, you have an index pattern! You should now be looking at a paginated list of the fields in your index or indices, as well as some informative data about them. Kibana has automatically set this new index pattern as your default index pattern. If you'd like to know more about index patterns, pop into to the [Settings](#settings) section of the documentation. **Did you know:** Both *indices* and *indexes* are acceptable plural forms of the word *index*. Knowledge is power. @@ -39,7 +37,7 @@ For the daring, snapshot builds are available. These builds are created after ea | platform | | | | --- | --- | --- | -| OSX | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-darwin-x64.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-darwin-x64.zip) | -| Linux x64 | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-linux-x64.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-linux-x64.zip) | -| Linux x86 | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-linux-x86.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-linux-x86.zip) | -| Windows | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-windows.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-4.3.0-snapshot-windows.zip) | +| OSX | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-darwin-x64.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-darwin-x64.zip) | +| Linux x64 | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-linux-x64.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-linux-x64.zip) | +| Linux x86 | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-linux-x86.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-linux-x86.zip) | +| Windows | [tar](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-windows.tar.gz) | [zip](http://download.elastic.co/kibana/kibana-snapshot/kibana-5.0.0-snapshot-windows.zip) | diff --git a/STYLEGUIDE.md b/STYLEGUIDE.md index 2c8af6f1b01f8..010de8e419b5c 100644 --- a/STYLEGUIDE.md +++ b/STYLEGUIDE.md @@ -773,41 +773,36 @@ When creating a utility function, attach it as a lodash mixin. Several already exist, and can be found in `src/kibana/utils/_mixins.js` -## Modules - -Kibana uses AMD modules to organize code, and require.js to load those modules. - -Even Angular code is loaded this way. - -### Module paths +## Filenames -Paths to modules should not be relative (ie. no dot notation). Instead, they should be loaded from one of the defined paths in the require config. +All filenames should use `snake_case` and *can* start with an underscore if the module is not intended to be used outside of it's containing module. *Right:* - -```js -require('some/base/path/my_module'); -require('another/path/another_module'); -``` + - `src/kibana/index_patterns/index_pattern.js` + - `src/kibana/index_patterns/_field.js` *Wrong:* + - `src/kibana/IndexPatterns/IndexPattern.js` + - `src/kibana/IndexPatterns/Field.js` -```js -require('../my_module'); -require('./path/another_module'); -``` +## Modules + +Kibana uses WebPack, which supports many types of module definitions. ### CommonJS Syntax -Module dependencies should be loaded via the CommonJS syntax: +Module dependencies should be written using CommonJS or ES2015 syntax: *Right:* ```js -define(function (require) { - var _ = require('lodash'); - ... -}); +const _ = require('lodash'); +module.exports = ...; +``` + +```js +import _ from 'lodash'; +export default ...; ``` *Wrong:* @@ -824,7 +819,7 @@ Kibana is written in Angular, and uses several utility methods to make using Ang ### Defining modules -Angular modules are defined using a custom require module named `module`. It is used as follows: +Angular modules are defined using a custom require module named `ui/modules`. It is used as follows: ```js var app = require('ui/modules').get('app/namespace'); @@ -872,7 +867,7 @@ require('ui/routes') # Html Style Guide -### Multiple attribute values +## Multiple attribute values When a node has multiple attributes that would cause it to exceed the line character limit, each attribute including the first should be on its own line with a single indent. Also, when a node that is styled in this way has child nodes, there should be a blank line between the openening parent tag and the first child tag. @@ -888,6 +883,38 @@ When a node has multiple attributes that would cause it to exceed the line chara ``` +# Api Style Guide + +## Paths + +API routes must start with the `/api/` path segment, and should be followed by the plugin id if applicable: + +*Right:* `/api/marvel/v1/nodes` +*Wrong:* `/marvel/api/v1/nodes` + +## Versions + +Kibana won't be supporting multiple API versions, so API's should not define a version. + +*Right:* `/api/kibana/index_patterns` +*Wrong:* `/api/kibana/v1/index_patterns` + +## snake_case + +Kibana uses `snake_case` for the entire API, just like Elasticsearch. All urls, paths, query string parameters, values, and bodies should be `snake_case` formatted. + +*Right:* +``` +POST /api/kibana/index_patterns +{ + "id": "...", + "time_field_name": "...", + "fields": [ + ... + ] +} +``` + # Attribution This JavaScript guide forked from the [node style guide](https://github.com/felixge/node-style-guide) created by [Felix Geisendörfer](http://felixge.de/) and is diff --git a/docs/advanced-settings.asciidoc b/docs/advanced-settings.asciidoc new file mode 100644 index 0000000000000..e649d7dc5d8a8 --- /dev/null +++ b/docs/advanced-settings.asciidoc @@ -0,0 +1,37 @@ +[[kibana-settings-reference]] + +WARNING: Modifying the following settings can signficantly affect Kibana's performance and cause problems that are difficult to diagnose. Setting a property's value to a blank field will revert to the default behavior, which may not be compatible with other configuration settings. Deleting a custom setting removes it from Kibana permanently. + +.Kibana Settings Reference +[horizontal] +`query:queryString:options`:: Options for the Lucene query string parser. +`sort:options`:: Options for the Elasticsearch https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-sort.html[sort] parameter. +`dateFormat`:: The format to use for displaying pretty-formatted dates. +`dateFormat:tz`:: The timezone that Kibana uses. The default value of `Browser` uses the timezone detected by the browser. +`dateFormat:scaled`:: These values define the format used to render ordered time-based data. Formatted timestamps must adapt to the interval between measurements. Keys are http://en.wikipedia.org/wiki/ISO_8601#Time_intervals[ISO8601 intervals]. +`defaultIndex`:: Default is `null`. This property specifies the default index. +`metaFields`:: An array of fields outside of `_source`. Kibana merges these fields into the document when displaying the document. +`discover:sampleSize`:: The number of rows to show in the Discover table. +`doc_table:highlight`:: Highlight results in Discover and Saved Searches Dashboard. Highlighing makes request slow when working on big documents. Set this property to `false` to disable highlighting. +`courier:maxSegmentCount`:: Kibana splits requests in the Discover app into segments to limit the size of requests sent to the Elasticsearch cluster. This setting constrains the length of the segment list. Long segment lists can significantly increase request processing time. +`fields:popularLimit`:: This setting governs how many of the top most popular fields are shown. +`histogram:barTarget`:: When date histograms use the `auto` interval, Kibana attempts to generate this number of bars. +`histogram:maxBars`:: Date histograms are not generated with more bars than the value of this property, scaling values when necessary. +`visualization:tileMap:maxPrecision`:: The maximum geoHash precision displayed on tile maps: 7 is high, 10 is very high, 12 is the maximum. http://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[Explanation of cell dimensions]. +`visualization:tileMap:WMSdefaults`:: Default properties for the WMS map server support in the tile map. +`visualization:colorMapping`:: Maps values to specified colors within visualizations. +`visualization:loadingDelay`:: Time to wait before dimming visualizations during query. +`csv:separator`:: A string that serves as the separator for exported values. +`csv:quoteValues`:: Set this property to `true` to quote exported values. +`history:limit`:: In fields that have history, such as query inputs, the value of this property limits how many recent values are shown. +`shortDots:enable`:: Set this property to `true` to shorten long field names in visualizations. For example, instead of `foo.bar.baz`, show `f.b.baz`. +`truncate:maxHeight`:: This property specifies the maximum height that a cell occupies in a table. A value of 0 disables truncation. +`indexPattern:fieldMapping:lookBack`:: The value of this property sets the number of recent matching patterns to query the field mapping for index patterns with names that contain timestamps. +`format:defaultTypeMap`:: A map of the default format name for each field type. Field types that are not explicitly mentioned use "_default_". +`format:number:defaultPattern`:: Default numeral format for the "number" format. +`format:bytes:defaultPattern`:: Default numeral format for the "bytes" format. +`format:percent:defaultPattern`:: Default numeral format for the "percent" format. +`format:currency:defaultPattern`:: Default numeral format for the "currency" format. +`timepicker:timeDefaults`:: The default time filter selection. +`timepicker:refreshIntervalDefaults`:: The time filter's default refresh interval. +`dashboard:defaultDarkTheme`:: Set this property to `true` to make new dashboards use the dark theme by default. \ No newline at end of file diff --git a/docs/area.asciidoc b/docs/area.asciidoc index f18adf55043d2..799f6794ade3e 100644 --- a/docs/area.asciidoc +++ b/docs/area.asciidoc @@ -68,3 +68,9 @@ values. this box to change both upper and lower bounds to match the values returned in the data. *Show Tooltip*:: Check this box to enable the display of tooltips. *Show Legend*:: Check this box to enable the display of a legend next to the chart. + +[float] +[[area-viewing-detailed-information]] +==== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/autorefresh.asciidoc b/docs/autorefresh.asciidoc index a35982f0bdbc3..a97e49346f60c 100644 --- a/docs/autorefresh.asciidoc +++ b/docs/autorefresh.asciidoc @@ -6,15 +6,14 @@ When a refresh interval is set, it is displayed to the left of the Time Filter i To set the refresh interval: -. Click the *Time Filter* image:images/TimeFilter.jpg[Time -Filter] in the upper right corner of the menu bar. +. Click the *Time Filter* image:images/TimeFilter.jpg[Time Filter] in the upper right corner of the menu bar. . Click the *Refresh Interval* tab. . Choose a refresh interval from the list. To automatically refresh the data, click the image:images/autorefresh.png[] *Auto-refresh* button and select an autorefresh interval: -image::images/autorefresh-intervals.png +image::images/autorefresh-intervals.png[] When auto-refresh is enabled, Kibana's top bar displays a pause button and the auto-refresh interval: image:images/autorefresh-pause.png[]. Click the *Pause* button to pause auto-refresh. diff --git a/docs/color-formatter.asciidoc b/docs/color-formatter.asciidoc new file mode 100644 index 0000000000000..b03a0d59d07f9 --- /dev/null +++ b/docs/color-formatter.asciidoc @@ -0,0 +1,7 @@ +The `Color` field formatter enables you to specify colors with specific ranges of values for a numeric field. + +When you select the `Color` field formatter, Kibana displays the *Range*, *Font Color*, *Background Color*, and *Example* fields. + +Click the *Add Color* button to add a range of values to associate with a particular color. You can click in the *Font Color* and *Background Color* fields to display a color picker. You can also enter a specific hex code value in the field. The effect of your current color choices are displayed in the *Example* field. + +image::images/colorformatter.png[] \ No newline at end of file diff --git a/docs/dashboard.asciidoc b/docs/dashboard.asciidoc index 8fb9525d31830..f2c603345e47a 100644 --- a/docs/dashboard.asciidoc +++ b/docs/dashboard.asciidoc @@ -102,8 +102,6 @@ Move the cursor to the bottom right corner of the container until the cursor cha cursor changes, click and drag the corner of the container to change the container's size. Release the mouse button to confirm the new container size. -// enhancement request: a way to specify specific dimensions for a container in pixels, or at least display that info? - [float] [[removing-containers]] ==== Removing Containers @@ -137,6 +135,10 @@ grid includes the query duration, the request duration, the total number of reco index pattern used to make the query. image:images/NYCTA-Statistics.jpg[] +To export the raw data behind the visualization as a comma-separated-values (CSV) file, click on either the +*Raw* or *Formatted* links at the bottom of any of the detailed information tabs. A raw export contains the data as it +is stored in Elasticsearch. A formatted export contains the results of any applicable Kibana [field formatters]. + [float] [[changing-the-visualization]] === Changing the Visualization diff --git a/docs/datatable.asciidoc b/docs/datatable.asciidoc index 6e936052be2e3..b80cb8ccd812d 100644 --- a/docs/datatable.asciidoc +++ b/docs/datatable.asciidoc @@ -72,3 +72,9 @@ Checkboxes are available to enable and disable the following behaviors: *Show partial rows*:: Check this box to display a row even when there is no result. NOTE: Enabling these behaviors may have a substantial effect on performance. + +[float] +[[datatable-viewing-detailed-information]] +=== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/filter-pinning.asciidoc b/docs/filter-pinning.asciidoc index 95ca0b0e5a74a..090b009f63e3a 100644 --- a/docs/filter-pinning.asciidoc +++ b/docs/filter-pinning.asciidoc @@ -21,6 +21,78 @@ Toggle Filter image:images/filter-toggle.png[]:: Click this icon to _toggle_ a f filters, and display in green. Only elements that match the filter are displayed. To change this to an exclusion filters, displaying only elements that _don't_ match, toggle the filter. Exclusion filters display in red. Remove Filter image:images/filter-delete.png[]:: Click this icon to remove a filter entirely. +Custom Filter image:images/filter-custom.png[]:: Click this icon to display a text field where you can customize the JSON +representation of the filter and specify an alias to use for the filter name: ++ +image::images/filter-custom-json.png[] ++ +You can use JSON filter representation to implement predicate logic, with `should` for OR, `must` for AND, and `must_not` +for NOT: ++ +.OR Example +========== +[source,json] +{ + "bool": { + "should": [ + { + "term": { + "geoip.country_name.raw": "Canada" + } + }, + { + "term": { + "geoip.country_name.raw": "China" + } + } + ] + } +} +========== ++ +.AND Example +========== +[source,json] +{ + "bool": { + "must": [ + { + "term": { + "geoip.country_name.raw": "United States" + } + }, + { + "term": { + "geoip.city_name.raw": "New York" + } + } + ] + } +} + +========== ++ +.NOT Example +========== +[source,json] +{ + "bool": { + "must_not": [ + { + "term": { + "geoip.country_name.raw": "United States" + } + }, + { + "term": { + "geoip.country_name.raw": "Canada" + } + } + ] + } +} +========== +Click the *Done* button to update the filter with your changes. To apply any of the filter actions to all the filters currently in place, click the image:images/filter-actions.png[] *Global Filter Actions* button and select an action. diff --git a/docs/getting-started.asciidoc b/docs/getting-started.asciidoc index 178789b562522..0d787a66dbd9a 100644 --- a/docs/getting-started.asciidoc +++ b/docs/getting-started.asciidoc @@ -12,6 +12,13 @@ key Kibana functionality. By the end of this tutorial, you will have: The material in this section assumes you have a working Kibana install connected to a working Elasticsearch install. +Video tutorials are also available: + +* https://www.elastic.co/blog/kibana-4-video-tutorials-part-1[High-level Kibana 4 introduction, pie charts] +* https://www.elastic.co/blog/kibana-4-video-tutorials-part-2[Data discovery, bar charts, and line charts] +* https://www.elastic.co/blog/kibana-4-video-tutorials-part-3[Tile maps] +* https://www.elastic.co/blog/kibana-4-video-tutorials-part-4[Embedding Kibana 4 visualizations] + [float] [[tutorial-load-dataset]] === Before You Start: Loading Sample Data @@ -23,7 +30,7 @@ The tutorials in this section rely on the following data sets: * A set of fictitious accounts with randomly generated data. Download this data set by clicking here: https://github.com/bly2k/files/blob/master/accounts.zip?raw=true[accounts.zip] * A set of randomly generated log files. Download this data set by clicking here: - https://download.elastic.co/demos/kibana/gettingstarted/logs.jsonl.gz[logstash.jsonl.gz] + https://download.elastic.co/demos/kibana/gettingstarted/logs.jsonl.gz[logs.jsonl.gz] Two of the data sets are compressed. Use the following commands to extract the files: @@ -96,15 +103,77 @@ This mapping specifies the following qualities for the data set: * The _speaker_ field is a string that isn't analyzed. The string in this field is treated as a single unit, even if there are multiple words in the field. * The same applies to the _play_name_ field. -* The line_id and speech_number fields are integers. +* The _line_id_ and _speech_number_ fields are integers. + +The logs data set requires a mapping to label the latitude/longitude pairs in the logs as geographic locations by +applying the `geo_point` type to those fields. -The accounts and logstash data sets don't require any mappings, so at this point we're ready to load the data sets into -Elasticsearch with the following commands: +Use the following commands to establish `geo_point` mapping for the logs: + +[source,shell] +curl -XPUT http://localhost:9200/logstash-2015.05.18 -d ' +{ + "mappings": { + "log": { + "properties": { + "geo": { + "properties": { + "coordinates": { + "type": "geo_point" + } + } + } + } + } + } +} +'; [source,shell] -curl -XPOST 'localhost:9200/bank/_bulk?pretty' --data-binary @accounts.json +curl -XPUT http://localhost:9200/logstash-2015.05.19 -d ' +{ + "mappings": { + "log": { + "properties": { + "geo": { + "properties": { + "coordinates": { + "type": "geo_point" + } + } + } + } + } + } +} +'; + +[source,shell] +curl -XPUT http://localhost:9200/logstash-2015.05.20 -d ' +{ + "mappings": { + "log": { + "properties": { + "geo": { + "properties": { + "coordinates": { + "type": "geo_point" + } + } + } + } + } + } +} +'; + +The accounts data set doesn't require any mappings, so at this point we're ready to use the Elasticsearch +{ref}/docs-bulk.html[`bulk`] API to load the data sets with the following commands: + +[source,shell] +curl -XPOST 'localhost:9200/bank/account/_bulk?pretty' --data-binary @accounts.json curl -XPOST 'localhost:9200/shakespeare/_bulk?pretty' --data-binary @shakespeare.json -curl -XPOST 'localhost:9200/_bulk?pretty' --data-binary @logstash.json +curl -XPOST 'localhost:9200/_bulk?pretty' --data-binary @logs.jsonl These commands may take some time to execute, depending on the computing resources available. @@ -126,16 +195,21 @@ yellow open logstash-2015.05.20 5 1 4750 0 16.4mb [[tutorial-define-index]] === Defining Your Index Patterns -Each set of data loaded to Elasticsearch has an https://www.elastic.co/guide/en/kibana/current/settings.html#settings-create-pattern[index pattern]. In the previous section, the Shakespeare data set has an index named `shakespeare`, and the accounts +Each set of data loaded to Elasticsearch has an <>. In the previous section, the Shakespeare data set has an index named `shakespeare`, and the accounts data set has an index named `bank`. An _index pattern_ is a string with optional wildcards that can match multiple indices. For example, in the common logging use case, a typical index name contains the date in MM-DD-YYYY format, and an index pattern for May would look something like `logstash-2015.05*`. -For this tutorial, any pattern that matches either of the two indices we've loaded will work. Open a browser and +For this tutorial, any pattern that matches the name of an index we've loaded will work. Open a browser and navigate to `localhost:5601`. Click the *Settings* tab, then the *Indices* tab. Click *Add New* to define a new index -pattern. Since these data sets don't contain time-series data, make sure the *Index contains time-based events* box is -unchecked. Specify `shakes*` as the index pattern for the Shakespeare data set and click *Create* to define the index -pattern, then define a second index pattern named `ba*`. +pattern. Two of the sample data sets, the Shakespeare plays and the financial accounts, don't contain time-series data. +Make sure the *Index contains time-based events* box is unchecked when you create index patterns for these data sets. +Specify `shakes*` as the index pattern for the Shakespeare data set and click *Create* to define the index pattern, then +define a second index pattern named `ba*`. + +The Logstash data set does contain time-series data, so after clicking *Add New* to define the index for this data +set, make sure the *Index contains time-based events* box is checked and select the `@timestamp` field from the +*Time-field name* drop-down. [float] [[tutorial-discovering]] @@ -199,14 +273,14 @@ selector. Select the *balance* field from the *Field* drop-down, then click on * total number of ranges to six. Enter the following ranges: [source,text] -0 1000 -1000 3000 -3000 7000 -7000 15000 -15000 31000 +0 999 +1000 2999 +3000 6999 +7000 14999 +15000 30999 31000 50000 -Click the green *Apply changes* to display the chart: +Click the green *Apply changes* button image:images/apply-changes-button.png[] to display the chart: image::images/tutorial-visualize-pie-2.png[] @@ -214,8 +288,8 @@ This shows you what proportion of the 1000 accounts fall in these balance ranges we're going to add another bucket aggregation. We can break down each of the balance ranges further by the account holder's age. -Click *Add sub-buckets* at the bottom, then select the *Terms* aggregation and the *age* field from the drop-downs. -Click the green *Apply changes* button to add an external ring with the new results. +Click *Add sub-buckets* at the bottom, then select *Split Slices*. Choose the *Terms* aggregation and the *age* field from the drop-downs. +Click the green *Apply changes* button image:images/apply-changes-button.png[] to add an external ring with the new results. image::images/tutorial-visualize-pie-3.png[] @@ -230,9 +304,9 @@ image::images/tutorial-visualize-bar-1.png[] For the Y-axis metrics aggregation, select *Unique Count*, with *speaker* as the field. For Shakespeare plays, it might be useful to know which plays have the lowest number of distinct speaking parts, if your theater company is short on actors. For the X-Axis buckets, select the *Terms* aggregation with the *play_name* field. For the *Order*, select -*Bottom*, leaving the *Size* at 5. +*Ascending*, leaving the *Size* at 5. -Leave the other elements at their default values and click the green *Apply changes* button. Your chart should now look +Leave the other elements at their default values and click the green *Apply changes* button image:images/apply-changes-button.png[]. Your chart should now look like this: image::images/tutorial-visualize-bar-2.png[] @@ -247,7 +321,7 @@ as well as change many other options for your visualizations, by clicking the *O Now that you have a list of the smallest casts for Shakespeare plays, you might also be curious to see which of these plays makes the greatest demands on an individual actor by showing the maximum number of speeches for a given part. Add a Y-axis aggregation with the *Add metrics* button, then choose the *Max* aggregation for the *speech_number* field. In -the *Options* tab, change the *Bar Mode* drop-down to *grouped*, then click the green *Apply changes* button. Your +the *Options* tab, change the *Bar Mode* drop-down to *grouped*, then click the green *Apply changes* button image:images/apply-changes-button.png[]. Your chart should now look like this: image::images/tutorial-visualize-bar-3.png[] @@ -258,9 +332,9 @@ might therefore make more demands on an actor's memory. Save this chart with the name _Bar Example_. Next, we're going to make a tile map chart to visualize some geographic data. Click on *New Visualization*, then -*Tile map*. Select *From a new search* and the `logstash-*` index pattern. Define the time window for the events we're -exploring by clicking the time selector at the top right of the Kibana interface. Click on *Absolute*, then set the -end time for the range to May 20, 2015 and the start time to May 18, 2015: +*Tile map*. Select *From a new search* and the `logstash-*` index pattern. Define the time window for the events +we're exploring by clicking the time selector at the top right of the Kibana interface. Click on *Absolute*, then set +the start time to May 18, 2015 and the end time for the range to May 20, 2015: image::images/tutorial-timepicker.png[] @@ -269,7 +343,7 @@ at the bottom. You'll see a map of the world, since we haven't defined any bucke image::images/tutorial-visualize-map-1.png[] -Select *Geo Coordinates* as the bucket, then click the green *Apply changes* button. Your chart should now look like +Select *Geo Coordinates* as the bucket, then click the green *Apply changes* button image:images/apply-changes-button.png[]. Your chart should now look like this: image::images/tutorial-visualize-map-2.png[] @@ -297,7 +371,7 @@ Write the following text in the field: The Markdown widget uses **markdown** syntax. > Blockquotes in Markdown use the > character. -Click the green *Apply changes* button to display the rendered Markdown in the preview pane: +Click the green *Apply changes* button image:images/apply-changes-button.png[] to display the rendered Markdown in the preview pane: image::images/tutorial-visualize-md-2.png[] diff --git a/docs/images/K4Refresh.png b/docs/images/K4Refresh.png index 953577c79e338..278a23acddb1e 100644 Binary files a/docs/images/K4Refresh.png and b/docs/images/K4Refresh.png differ diff --git a/docs/images/Start-Page.jpg b/docs/images/Start-Page.jpg deleted file mode 100644 index fb690aa2f37b3..0000000000000 Binary files a/docs/images/Start-Page.jpg and /dev/null differ diff --git a/docs/images/Start-Page.png b/docs/images/Start-Page.png new file mode 100644 index 0000000000000..756b6b5bdbd2d Binary files /dev/null and b/docs/images/Start-Page.png differ diff --git a/docs/images/apply-changes-button.png b/docs/images/apply-changes-button.png new file mode 100644 index 0000000000000..c45723877a51c Binary files /dev/null and b/docs/images/apply-changes-button.png differ diff --git a/docs/images/colorformatter.png b/docs/images/colorformatter.png new file mode 100644 index 0000000000000..df5dc34dd31e5 Binary files /dev/null and b/docs/images/colorformatter.png differ diff --git a/docs/images/filter-allbuttons.png b/docs/images/filter-allbuttons.png index d2759a652ed2c..4ed33b7f711f3 100644 Binary files a/docs/images/filter-allbuttons.png and b/docs/images/filter-allbuttons.png differ diff --git a/docs/images/filter-custom-json.png b/docs/images/filter-custom-json.png new file mode 100644 index 0000000000000..7c1426b804649 Binary files /dev/null and b/docs/images/filter-custom-json.png differ diff --git a/docs/images/filter-custom.png b/docs/images/filter-custom.png new file mode 100644 index 0000000000000..6671ddcdc6052 Binary files /dev/null and b/docs/images/filter-custom.png differ diff --git a/docs/images/tutorial-visualize-bar-2.png b/docs/images/tutorial-visualize-bar-2.png index e07527fa612a8..471922df2ac8a 100644 Binary files a/docs/images/tutorial-visualize-bar-2.png and b/docs/images/tutorial-visualize-bar-2.png differ diff --git a/docs/index.asciidoc b/docs/index.asciidoc index ed10554a0968e..ca50db3926ab8 100644 --- a/docs/index.asciidoc +++ b/docs/index.asciidoc @@ -1,9 +1,10 @@ [[kibana-guide]] = Kibana User Guide -:ref: http://www.elastic.co/guide/en/elasticsearch/reference/2.0/ -:shield: https://www.elastic.co/guide/en/shield/2.0 +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current/ +:shield: https://www.elastic.co/guide/en/shield/current :k4issue: https://github.com/elastic/kibana/issues/ +:k4pull: https://github.com/elastic/kibana/pull/ include::introduction.asciidoc[] diff --git a/docs/introduction.asciidoc b/docs/introduction.asciidoc index 7defd9245c59e..f4e336104360c 100644 --- a/docs/introduction.asciidoc +++ b/docs/introduction.asciidoc @@ -13,10 +13,8 @@ dashboards that display changes to Elasticsearch queries in real time. Setting up Kibana is a snap. You can install Kibana and start exploring your Elasticsearch indices in minutes -- no code, no additional infrastructure required. -NOTE: This guide describes how to use Kibana 4.2. For information about what's new -in Kibana 4.2, see the <>. For earlier versions of Kibana 4, see the -http://www.elastic.co/guide/en/kibana/4.1/index.html[Kibana 4.1 User Guide]. For information about Kibana 3, see the -http://www.elastic.co/guide/en/kibana/3.0/index.html[Kibana 3 User Guide]. +NOTE: This guide describes how to use Kibana 4.3. For information about what's new +in Kibana 4.3, see the <>. [float] [[data-discovery]] diff --git a/docs/line.asciidoc b/docs/line.asciidoc index 90f67cf37dafb..3ab261c43260c 100644 --- a/docs/line.asciidoc +++ b/docs/line.asciidoc @@ -53,10 +53,16 @@ changes* button to keep your visualization in its current state. [float] [[bubble-chart]] -=== Bubble Charts +==== Bubble Charts You can convert a line chart visualization to a bubble chart by performing the following steps: . Click *Add Metrics* for the visualization's Y axis, then select *Dot Size*. . Select a metric aggregation from the drop-down list. . In the *Options* tab, uncheck the *Show Connecting Lines* box. . Click the *Apply changes* button. + +[float] +[[line-viewing-detailed-information]] +==== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/metric.asciidoc b/docs/metric.asciidoc index 3170a3b2961a2..8a813f7dba866 100644 --- a/docs/metric.asciidoc +++ b/docs/metric.asciidoc @@ -18,3 +18,9 @@ NOTE: In Elasticsearch releases 1.4.3 and later, this functionality requires you The availability of these options varies depending on the aggregation you choose. Click the *Options* tab to change the font used to display the metrics. + +[float] +[[metric-viewing-detailed-information]] +==== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/pie.asciidoc b/docs/pie.asciidoc index 145da7a5082da..5ad500f4ee435 100644 --- a/docs/pie.asciidoc +++ b/docs/pie.asciidoc @@ -80,3 +80,9 @@ Select the *Options* tab to change the following aspects of the table: After changing options, click the green *Apply changes* button to update your visualization, or the grey *Discard changes* button to keep your visualization in its current state. + +[float] +[[pie-viewing-detailed-information]] +==== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/plugins.asciidoc b/docs/plugins.asciidoc index 8352a03822401..47e2ad15b0656 100644 --- a/docs/plugins.asciidoc +++ b/docs/plugins.asciidoc @@ -121,6 +121,7 @@ path to that configuration file each time you use the `bin/kibana plugin` comman 74:: I/O error 70:: Other error +[float] [[plugin-switcher]] == Switching Plugin Functionality diff --git a/docs/production.asciidoc b/docs/production.asciidoc index 2fb4576e4ae07..13eaffc29ec1b 100644 --- a/docs/production.asciidoc +++ b/docs/production.asciidoc @@ -54,14 +54,14 @@ the dynamic mapping feature in Elasticsearch. Kibana supports SSL encryption for both client requests and the requests the Kibana server sends to Elasticsearch. -To encrypt communications between the browser and the Kibana server, you configure the `ssl_key_file `and +To encrypt communications between the browser and the Kibana server, you configure the `ssl_key_file` and `ssl_cert_file` properties in `kibana.yml`: [source,text] ---- # SSL for outgoing requests from the Kibana Server (PEM formatted) -ssl_key_file: /path/to/your/server.key -ssl_cert_file: /path/to/your/server.crt +server.ssl.key: /path/to/your/server.key +server.ssl.cert: /path/to/your/server.crt ---- If you are using Shield or a proxy that provides an HTTPS endpoint for Elasticsearch, @@ -97,7 +97,7 @@ visualization. To configure access to Kibana using Shield, you create Shield roles for Kibana using the `kibana4` default role as a starting point. For more -information, see {shield}/kibana.html#using-kibana4-with-shield[Shield with Kibana 4][Using Shield with Kibana 4]. +information, see {shield}/kibana.html#using-kibana4-with-shield[Using Kibana 4 with Shield]. [float] [[load-balancing]] diff --git a/docs/releasenotes.asciidoc b/docs/releasenotes.asciidoc index 348d9f9f4675d..0e7065d0c749f 100644 --- a/docs/releasenotes.asciidoc +++ b/docs/releasenotes.asciidoc @@ -1,35 +1,27 @@ [[releasenotes]] -== Kibana 4.2 Release Notes +== Kibana 4.3 Release Notes -* Starting with the 2.0 release of Elasticsearch, you can https://github.com/elastic/elasticsearch/pull/9670[no longer] -reference fields by leafnode. For example, the `geoip.country_code` field can no longer support searches such as -`country_code:US`. Instead, use the entire field name. This change affects saved searches, visualizations, and dashboards -from previous versions of Kibana. +The 4.3 release of Kibana requires Elasticsearch 2.1 or later. + +Using event times to create index names is *deprecated* in this release of Kibana. Support for this functionality will be +removed entirely in the next major Kibana release. Elasticsearch 2.1 includes sophisticated date parsing APIs that Kibana +uses to determine date information, removing the need to specify dates in the index pattern name. [float] -[[enhancementss]] +[[enhancements]] == Enhancements -* {k4issue}2855[Issue 2855]: Configurable Kibana log levels. -* {k4issue}4204[Issue 4204]: Plugin manager for Kibana server. -* {k4issue}4218[Issue 4218]: New interface for switching between plugins. -* {k4issue}3270[Issue 3270]: Server status page. -* {k4issue}2245[Issue 2245]: Customizable visualization descriptions. -* {k4issue}2906[Issue 2906]: Dark theme added to dashboards. -* {k4issue}1902[Issue 1902]: Tile maps support configurable WMS-compliant map servers. -* {k4issue}2245[Issue 2245]: Customize legends for the Filter aggregation. -* {k4issue}4817[Issue 4817]: Advanced Settings option maps values to colors. -* {k4issue}2874[Issue 2874]: Advanced Settings option to change default timepicker value. -* {k4issue}2760[Issue 2760]: Adds ability to filter by clicking on a legend. -* {k4issue}485[Issue 485]: Adds color rationalization across visualizations. +* {k4issue}5109[Issue 5109]: Adds custom JSON and filter alias naming for filters. +* {k4issue}1726[Issue 1726]: Adds a color field formatter for value ranges in numeric fields. +* {k4issue}4342[Issue 4342]: Increased performance for wildcard indices. +* {k4issue}1600[Issue 1600]: Support for global time zones. +* {k4pull}5275[Pull Request 5275]: Highlighting values in Discover can now be disabled. +* {k4issue}5212[Issue 5212]: Adds support for multiple certificate authorities. +* {k4issue}2716[Issue 2716]: The open/closed position of the spy panel now persists across UI state changes. [float] [[bugfixes]] == Bug Fixes -* {k4issue}4902[Issue 4902]: Fixes a problem with object export logic. -* {k4issue}3694[Issue 3694]: Improves timezone handling in visualizations as a result of Olson timezone support in ES. -* {k4issue}4261[Issue 4261]: Improves highlighting on embedded searches in dashboards. -* {k4issue}4816[Issue 4816]: Improves history limit setting to clear past history. -* {k4issue}4670[Issue 4670]: Fixes issue with range aggregation. -* {k4issue}4244[Issue 4244]: Fixes issue with pie chart labels. \ No newline at end of file +* {k4issue}5165[Issue 5165]: Resolves a display error in embedded views. +* {k4issue}5021[Issue 5021]: Improves visualization dimming for dashboards with auto-refresh. diff --git a/docs/settings.asciidoc b/docs/settings.asciidoc index 786bb8c71b09c..4a89fc38fa514 100644 --- a/docs/settings.asciidoc +++ b/docs/settings.asciidoc @@ -18,14 +18,6 @@ index names that match the specified pattern. An asterisk (*) in the pattern matches zero or more characters. For example, the pattern `myindex-*` matches all indices whose names start with `myindex-`, such as `myindex-1` and `myindex-2`. -If you use event times to create index names (for example, if you're pushing data into Elasticsearch from Logstash), -the index pattern can also contain a date format. -In this case, the static text in the pattern must be enclosed in brackets, and you specify the date format using the -tokens described in <>. - -For example, `[logstash-]YYYY.MM.DD` matches all indices whose names have a timestamp of the form `YYYY.MM.DD` appended -to the prefix `logstash-`, such as `logstash-2015.01.31` and `logstash-2015-02-01`. - An index pattern can also simply be the name of a single index. To create an index pattern to connect to Elasticsearch: @@ -43,16 +35,17 @@ list. contains time-based events* option and select the index field that contains the timestamp. Kibana reads the index mapping to list all of the fields that contain a timestamp. -. If new indices are generated periodically and have a timestamp appended to the name, select the *Use event times to -create index names* option and select the *Index pattern interval*. This enables Kibana to search only those indices -that could possibly contain data in the time range you specify. This is primarily applicable if you are using Logstash -to feed data into Elasticsearch. - . Click *Create* to add the index pattern. . To designate the new pattern as the default pattern to load when you view the Discover tab, click the *favorite* button. +To use an event time in an index name, enclose the static text in the pattern and specify the date format using the +tokens described in the following table. + +For example, `[logstash-]YYYY.MM.DD` matches all indices whose names have a timestamp of the form `YYYY.MM.DD` appended +to the prefix `logstash-`, such as `logstash-2015.01.31` and `logstash-2015-02-01`. + [float] [[date-format-tokens]] .Date Format Tokens @@ -155,6 +148,46 @@ the field's format from the *Format* drop-down. Format options vary based on the You can also set the field's popularity value in the *Popularity* text entry box to any desired value. Click the *Update Field* button to confirm your changes or *Cancel* to return to the list of fields. +Kibana has https://www.elastic.co/blog/kibana-4-1-field-formatters[field formatters] for the following field types: + +==== String Field Formatters + +String fields support the `String` and `Url` formatters. + +include::string-formatter.asciidoc[] + +include::url-formatter.asciidoc[] + +==== Date Field Formatters + +Date fields support the `Date`, `Url`, and `String` formatters. + +The `Date` formatter enables you to choose the display format of date stamps using the http://moment.js[moment.js] +standard format definitions. + +include::string-formatter.asciidoc[] + +include::url-formatter.asciidoc[] + +==== Geographic Point Field Formatters + +Geographic point fields support the `String` formatter. + +include::string-formatter.asciidoc[] + +==== Numeric Field Formatters + +Numeric fields support the `Url`, `String`, `Bytes`, `Number`, `Percentage`, and `Color` formatters. + +include::string-formatter.asciidoc[] + +include::url-formatter.asciidoc[] + +include::color-formatter.asciidoc[] + +The `Bytes`, `Number`, and `Percentage` formatters enable you to choose the display formats of numbers in this field using +the https://adamwdraper.github.io/Numeral-js/[numeral.js] standard format definitions. + [float] [[create-scripted-field]] === Creating a Scripted Field @@ -215,13 +248,10 @@ To delete a scripted field: [[advanced-options]] === Setting Advanced Options -The Advanced Settings page enables you to directly edit settings that control the behavior of the Kibana application. +The *Advanced Settings* page enables you to directly edit settings that control the behavior of the Kibana application. For example, you can change the format used to display dates, specify the default index pattern, and set the precision for displayed decimal values. -WARNING: Changing advanced settings can have unintended consequences. If you aren't sure what you're doing, it's best -to leave these settings as-is. - To set advanced options: . Go to *Settings > Advanced*. @@ -229,62 +259,7 @@ To set advanced options: . Enter a new value for the option. . Click the *Save* button. -[float] -[[managing-saved-objects]] -=== Managing Saved Searches, Visualizations, and Dashboards - -You can view, edit, and delete saved searches, visualizations, and dashboards from *Settings > Objects*. You can also -export or import sets of searches, visualizations, and dashboards. - -Viewing a saved object displays the selected item in the *Discover*, *Visualize*, or *Dashboard* page. To view a saved -object: - -. Go to *Settings > Objects*. -. Select the object you want to view. -. Click the *View* button. - -Editing a saved object enables you to directly modify the object definition. You can change the name of the object, add -a description, and modify the JSON that defines the object's properties. - -If you attempt to access an object whose index has been deleted, Kibana displays its Edit Object page. You can: - -* Recreate the index so you can continue using the object. -* Delete the object and recreate it using a different index. -* Change the index name referenced in the object's `kibanaSavedObjectMeta.searchSourceJSON` to point to an existing -index pattern. This is useful if the index you were working with has been renamed. - -WARNING: No validation is performed for object properties. Submitting invalid changes will render the object unusable. -Generally, you should use the *Discover*, *Visualize*, or *Dashboard* pages to create new objects instead of directly -editing existing ones. - -To edit a saved object: - -. Go to *Settings > Objects*. -. Select the object you want to edit. -. Click the *Edit* button. -. Make your changes to the object definition. -. Click the *Save Object* button. - -To delete a saved object: - -. Go to *Settings > Objects*. -. Select the object you want to delete. -. Click the *Delete* button. -. Confirm that you really want to delete the object. - -To export a set of objects: - -. Go to *Settings > Objects*. -. Select the type of object you want to export. You can export a set of dashboards, searches, or visualizations. -. Click the selection box for the objects you want to export, or click the *Select All* box. -. Click *Export* to select a location to write the exported JSON. - -To import a set of objects: - -. Go to *Settings > Objects*. -. Click *Import* to navigate to the JSON file representing the set of objects to import. -. Click *Open* after selecting the JSON file. -. If any objects in the set would overwrite objects already present in Kibana, confirm the overwrite. +include::advanced-settings.asciidoc[] [[kibana-server-properties]] === Setting Kibana Server Properties @@ -296,7 +271,7 @@ you'll need to update your `kibana.yml` file. You can also enable SSL and set a deprecated[4.2, The names of several Kibana server properties changed in the 4.2 release of Kibana. The previous names remain as functional aliases, but are now deprecated and will be removed in a future release of Kibana] [horizontal] -.Kibana Server Properties +.Kibana Server Properties Changed in the 4.2 Release `server.port` added[4.2]:: The port that the Kibana server runs on. + *alias*: `port` deprecated[4.2] @@ -331,7 +306,7 @@ deprecated[4.2, The names of several Kibana server properties changed in the 4.2 `elasticsearch.password` added[4.2]:: This parameter specifies the password for Elasticsearch instances that use HTTP basic authentication. Kibana users still need to authenticate with Elasticsearch, which is proxied through the Kibana server. + -*alias*: `kibana_elasticsearch_password` deprecated [4.2] +*alias*: `kibana_elasticsearch_password` deprecated[4.2] `elasticsearch.username` added[4.2]:: This parameter specifies the username for Elasticsearch instances that use HTTP basic authentication. Kibana users still need to authenticate with Elasticsearch, which is proxied through the Kibana server. + @@ -400,7 +375,8 @@ deprecated[4.2, The names of several Kibana server properties changed in the 4.2 + *default*: `true` -`elasticsearch.ssl.ca` added[4.2]:: The path to the CA certificate for your Elasticsearch instance. Specify if you are using a self-signed certificate so the certificate can be verified. Disable `elasticsearch.ssl.verify` otherwise. +`elasticsearch.ssl.ca`:: An array of paths to the CA certificates for your Elasticsearch instance. Specify if +you are using a self-signed certificate so the certificate can be verified. Disable `elasticsearch.ssl.verify` otherwise. + *alias*: `ca` deprecated[4.2] @@ -421,3 +397,60 @@ deprecated[4.2, The names of several Kibana server properties changed in the 4.2 `logging.dest` added[4.2]:: The location where you want to store the Kibana's log output. If not specified, log output is written to standard output and not stored. Specifying a log file suppresses log writes to standard output. + *alias*: `log_file` deprecated[4.2] + +[[managing-saved-objects]] +=== Managing Saved Searches, Visualizations, and Dashboards + +You can view, edit, and delete saved searches, visualizations, and dashboards from *Settings > Objects*. You can also +export or import sets of searches, visualizations, and dashboards. + +Viewing a saved object displays the selected item in the *Discover*, *Visualize*, or *Dashboard* page. To view a saved +object: + +. Go to *Settings > Objects*. +. Select the object you want to view. +. Click the *View* button. + +Editing a saved object enables you to directly modify the object definition. You can change the name of the object, add +a description, and modify the JSON that defines the object's properties. + +If you attempt to access an object whose index has been deleted, Kibana displays its Edit Object page. You can: + +* Recreate the index so you can continue using the object. +* Delete the object and recreate it using a different index. +* Change the index name referenced in the object's `kibanaSavedObjectMeta.searchSourceJSON` to point to an existing +index pattern. This is useful if the index you were working with has been renamed. + +WARNING: No validation is performed for object properties. Submitting invalid changes will render the object unusable. +Generally, you should use the *Discover*, *Visualize*, or *Dashboard* pages to create new objects instead of directly +editing existing ones. + +To edit a saved object: + +. Go to *Settings > Objects*. +. Select the object you want to edit. +. Click the *Edit* button. +. Make your changes to the object definition. +. Click the *Save Object* button. + +To delete a saved object: + +. Go to *Settings > Objects*. +. Select the object you want to delete. +. Click the *Delete* button. +. Confirm that you really want to delete the object. + +To export a set of objects: + +. Go to *Settings > Objects*. +. Select the type of object you want to export. You can export a set of dashboards, searches, or visualizations. +. Click the selection box for the objects you want to export, or click the *Select All* box. +. Click *Export* to select a location to write the exported JSON. + +To import a set of objects: + +. Go to *Settings > Objects*. +. Click *Import* to navigate to the JSON file representing the set of objects to import. +. Click *Open* after selecting the JSON file. +. If any objects in the set would overwrite objects already present in Kibana, confirm the overwrite. + diff --git a/docs/setup.asciidoc b/docs/setup.asciidoc index 80f8bbbec53ca..60a3124640bc9 100644 --- a/docs/setup.asciidoc +++ b/docs/setup.asciidoc @@ -3,7 +3,7 @@ You can set up Kibana and start exploring your Elasticsearch indices in minutes. All you need is: -* Elasticsearch 1.4.4 or later +* Elasticsearch 2.1 or later * A modern web browser - http://www.elastic.co/subscriptions/matrix#matrix_browsers[Supported Browsers]. * Information about your Elasticsearch installation: ** URL of the Elasticsearch instance you want to connect to. @@ -71,20 +71,21 @@ To configure the Elasticsearch indices you want to access with Kibana: . Point your browser at port 5601 to access the Kibana UI. For example, `localhost:5601` or `http://YOURDOMAIN.com:5601`. + -image:images/Start-Page.jpg[Kibana start page] +image:images/Start-Page.png[Kibana start page] + . Specify an index pattern that matches the name of one or more of your Elasticsearch indices. By default, Kibana guesses that you're working with data being fed into Elasticsearch by Logstash. If that's the case, you can use the default `logstash-*` as your index pattern. The asterisk (*) matches zero or more characters in an index's name. If -your Elasticsearch indices follow some other naming convention, enter an appropriate pattern. The "pattern" can also +your Elasticsearch indices follow some other naming convention, enter an appropriate pattern. The "pattern" can also simply be the name of a single index. . Select the index field that contains the timestamp that you want to use to perform time-based comparisons. Kibana reads the index mapping to list all of the fields that contain a timestamp. If your index doesn't have time-based data, disable the *Index contains time-based events* option. -. If new indices are generated periodically and have a timestamp appended to the name, select the *Use event times to -create index names* option and select the *Index pattern interval*. This improves search performance by enabling Kibana -to search only those indices that could contain data in the time range you specify. This is primarily applicable if you -are using Logstash to feed data into Elasticsearch. ++ +WARNING: Using event times to create index names is *deprecated* in this release of Kibana. Support for this functionality +will be removed entirely in the next major Kibana release. Elasticsearch 2.1 includes sophisticated date parsing APIs that +Kibana uses to determine date information, removing the need to specify dates in the index pattern name. ++ . Click *Create* to add the index pattern. This first pattern is automatically configured as the default. When you have more than one index pattern, you can designate which one to use as the default from *Settings > Indices*. diff --git a/docs/string-formatter.asciidoc b/docs/string-formatter.asciidoc new file mode 100644 index 0000000000000..0cde079122ba4 --- /dev/null +++ b/docs/string-formatter.asciidoc @@ -0,0 +1,10 @@ +The `String` field formatter can apply the following transformations to the field's contents: + +* Convert to lowercase +* Convert to uppercase +* Apply the short dots transformation, which replaces the content before a `.` character with the first character of +that content, as in the following example: + +[horizontal] +*Original*:: *Becomes* +`com.organizations.project.ClassName`:: `c.o.p.ClassName` \ No newline at end of file diff --git a/docs/tilemap.asciidoc b/docs/tilemap.asciidoc index 62a0283124fdb..804abea1219b6 100644 --- a/docs/tilemap.asciidoc +++ b/docs/tilemap.asciidoc @@ -26,7 +26,7 @@ Coordinates* on a single chart. A multiple chart split must run before any other Tile maps use the *Geohash* aggregation as their initial aggregation. Select a field, typically coordinates, from the drop-down. The *Precision* slider determines the granularity of the results displayed on the map. See the documentation -for the {ref}search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[geohash grid] +for the {ref}/search-aggregations-bucket-geohashgrid-aggregation.html#_cell_dimensions_at_the_equator[geohash grid] aggregation for details on the area specified by each precision level. Kibana supports a maximum geohash length of 7. NOTE: Higher precisions increase memory usage for the browser displaying Kibana as well as for the underlying @@ -134,3 +134,9 @@ across the map to zoom in on the selection. geohash buckets that have at least one result. * Click the *Latitude/Longitude Filter* image:images/viz-lat-long-filter.png[] button, then drag a bounding box across the map, to create a filter for the box coordinates. + +[float] +[[tilemap-viewing-detailed-information]] +==== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/url-formatter.asciidoc b/docs/url-formatter.asciidoc new file mode 100644 index 0000000000000..819523c6cbf53 --- /dev/null +++ b/docs/url-formatter.asciidoc @@ -0,0 +1,28 @@ +The `Url` field formatter can take on the following types: + +* The *Link* type turn the contents of the field into an URL. +* The *Image* type can be used to specify an image directory where a specified image is located. + +You can customize either type of URL field formats with templates. A _URL template_ enables you to add specific values +to a partial URL. Use the string `{{value}}` to add the contents of the field to a fixed URL. + +For example, when: + +* A field contains a user ID +* That field uses the `Url` field formatter +* The URI template is `http://company.net/profiles?user_id={­{value}­}` + +The resulting URL replaces `{{value}}` with the user ID from the field. + +The `{{value}}` template string URL-encodes the contents of the field. When a field encoded into a URL contains +non-ASCII characters, these characters are replaced with a `%` character and the appropriate hexadecimal code. For +example, field contents `users/admin` result in the URL template adding `users%2Fadmin`. + +When the formatter type is set to *Image*, the `{{value}}` template string specifies the name of an image at the +specified URI. + +In order to pass unescaped values directly to the URL, use the `{{rawValue}}` string. + +A _Label Template_ enables you to specify a text string that displays instead of the raw URL. You can use the +`{{value}}` template string normally in label templates. You can also use the `{{url}}` template string to display +the formatted URL. diff --git a/docs/vertbar.asciidoc b/docs/vertbar.asciidoc index 08687d85802db..d14b31fbff99d 100644 --- a/docs/vertbar.asciidoc +++ b/docs/vertbar.asciidoc @@ -3,7 +3,28 @@ This chart's Y axis is the _metrics_ axis. The following aggregations are available for this axis: -include::y-axis-aggs.asciidoc[] +*Count*:: The {ref}/search-aggregations-metrics-valuecount-aggregation.html[_count_] aggregation returns a raw count of +the elements in the selected index pattern. +*Average*:: This aggregation returns the {ref}/search-aggregations-metrics-avg-aggregation.html[_average_] of a numeric +field. Select a field from the drop-down. +*Sum*:: The {ref}/search-aggregations-metrics-sum-aggregation.html[_sum_] aggregation returns the total sum of a numeric +field. Select a field from the drop-down. +*Min*:: The {ref}/search-aggregations-metrics-min-aggregation.html[_min_] aggregation returns the minimum value of a +numeric field. Select a field from the drop-down. +*Max*:: The {ref}/search-aggregations-metrics-max-aggregation.html[_max_] aggregation returns the maximum value of a +numeric field. Select a field from the drop-down. +*Unique Count*:: The {ref}/search-aggregations-metrics-cardinality-aggregation.html[_cardinality_] aggregation returns +the number of unique values in a field. Select a field from the drop-down. +*Percentiles*:: The {ref}/search-aggregations-metrics-percentile-aggregation.html[_percentile_] aggregation divides the +values in a numeric field into percentile bands that you specify. Select a field from the drop-down, then specify one +or more ranges in the *Percentiles* fields. Click the *X* to remove a percentile field. Click *+ Add* to add a +percentile field. +*Percentile Rank*:: The {ref}/search-aggregations-metrics-percentile-rank-aggregation.html[_percentile ranks_] +aggregation returns the percentile rankings for the values in the numeric field you specify. Select a numeric field +from the drop-down, then specify one or more percentile rank values in the *Values* fields. Click the *X* to remove a +values field. Click *+Add* to add a values field. + +You can add an aggregation by clicking the *+ Add Aggregation* button. The _buckets_ aggregations determine what information is being retrieved from your data set. @@ -45,3 +66,9 @@ Checkboxes are available to enable and disable the following behaviors: *Show Legend*:: Check this box to enable the display of a legend next to the chart. *Scale Y-Axis to Data Bounds*:: The default Y axis bounds are zero and the maximum value returned in the data. Check this box to change both upper and lower bounds to match the values returned in the data. + +[float] +[[vertbar-viewing-detailed-information]] +==== Viewing Detailed Information + +include::visualization-raw-data.asciidoc[] diff --git a/docs/visualization-raw-data.asciidoc b/docs/visualization-raw-data.asciidoc new file mode 100644 index 0000000000000..0c882a6854ea2 --- /dev/null +++ b/docs/visualization-raw-data.asciidoc @@ -0,0 +1,22 @@ +To display the raw data behind the visualization, click the bar at the bottom of the container. Tabs with detailed +information about the raw data replace the visualization: + +.Table +A representation of the underlying data, presented as a paginated data grid. You can sort the items +in the table by clicking on the table headers at the top of each column. + +.Request +The raw request used to query the server, presented in JSON format. + +.Response +The raw response from the server, presented in JSON format. + +.Statistics +A summary of the statistics related to the request and the response, presented as a data grid. The data +grid includes the query duration, the request duration, the total number of records found on the server, and the +index pattern used to make the query. + +To export the raw data behind the visualization as a comma-separated-values (CSV) file, click on either the +*Raw* or *Formatted* links at the bottom of any of the detailed information tabs. A raw export contains the data as it +is stored in Elasticsearch. A formatted export contains the results of any applicable Kibana +<>. diff --git a/docs/visualize.asciidoc b/docs/visualize.asciidoc index f1c571352f60c..780e182b5bf52 100644 --- a/docs/visualize.asciidoc +++ b/docs/visualize.asciidoc @@ -98,12 +98,12 @@ colors, and row/column splits. For pie charts, use the metric for the slice size slices. Choose the metric aggregation for your visualization's Y axis, such as -{ref}search-aggregations-metrics-valuecount-aggregation.html[count], -{ref}search-aggregations-metrics-avg-aggregation.html[average], -{ref}search-aggregations-metrics-sum-aggregation.html[sum], -{ref}search-aggregations-metrics-min-aggregation.html[min], -{ref}search-aggregations-metrics-max-aggregation.html[max], or -{ref}search-aggregations-metrics-cardinality-aggregation.html[cardinality] +{ref}/search-aggregations-metrics-valuecount-aggregation.html[count], +{ref}/search-aggregations-metrics-avg-aggregation.html[average], +{ref}/search-aggregations-metrics-sum-aggregation.html[sum], +{ref}/search-aggregations-metrics-min-aggregation.html[min], +{ref}/search-aggregations-metrics-max-aggregation.html[max], or +{ref}/search-aggregations-metrics-cardinality-aggregation.html[cardinality] (unique count). Use bucket aggregations for the visualization's X axis, color slices, and row/column splits. Common bucket aggregations include date histogram, range, terms, filters, and significant terms. @@ -131,6 +131,9 @@ NOTE: Remember, each subsequent bucket slices the data from the previous bucket. To render the visualization on the _preview canvas_, click the green *Apply Changes* button at the top right of the Aggregation Builder. +You can learn more about aggregation and how altering the order of aggregations affects your visualizations +https://www.elastic.co/blog/kibana-aggregation-execution-order-and-you[here]. + [float] [[visualize-filters]] include::filter-pinning.asciidoc[] diff --git a/docs/x-axis-aggs.asciidoc b/docs/x-axis-aggs.asciidoc index 71aac994f98e3..a39eb8a48a20b 100644 --- a/docs/x-axis-aggs.asciidoc +++ b/docs/x-axis-aggs.asciidoc @@ -14,7 +14,7 @@ down to one second. *Histogram*:: A standard {ref}search-aggregations-bucket-histogram-aggregation.html[_histogram_] is built from a numeric field. Specify an integer interval for this field. Select the *Show empty buckets* checkbox to include empty intervals in the histogram. -*Range*:: With a {ref}search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges +*Range*:: With a {ref}/search-aggregations-bucket-range-aggregation.html[_range_] aggregation, you can specify ranges of values for a numeric field. Click *Add Range* to add a set of range endpoints. Click the red *(x)* symbol to remove a range. *Date Range*:: A {ref}search-aggregations-bucket-daterange-aggregation.html[_date range_] aggregation reports values @@ -26,12 +26,12 @@ specify ranges of IPv4 addresses. Click *Add Range* to add a set of range endpoi remove a range. *Terms*:: A {ref}search-aggregations-bucket-terms-aggregation.html[_terms_] aggregation enables you to specify the top or bottom _n_ elements of a given field to display, ordered by count or a custom metric. -*Filters*:: You can specify a set of {ref}search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. +*Filters*:: You can specify a set of {ref}/search-aggregations-bucket-filters-aggregation.html[_filters_] for the data. You can specify a filter as a query string or in JSON format, just as in the Discover search bar. Click *Add Filter* to add another filter. Click the images:labelbutton.png[] *label* button to open the label field, where you can type in a name to display on the visualization. *Significant Terms*:: Displays the results of the experimental -{ref}search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. +{ref}/search-aggregations-bucket-significantterms-aggregation.html[_significant terms_] aggregation. Once you've specified an X axis aggregation, you can define sub-aggregations to refine the visualization. Click *+ Add Sub Aggregation* to define a sub-aggregation, then choose *Split Area* or *Split Chart*, then select a sub-aggregation diff --git a/package.json b/package.json index 73be03bd812ff..9a71d138a483c 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,7 @@ "dashboarding" ], "private": false, - "version": "4.3.0-snapshot", + "version": "5.0.0-snapshot", "build": { "number": 8467, "sha": "6cb7fec4e154faa0a4a3fee4b33dfef91b9870d9" diff --git a/src/plugins/elasticsearch/index.js b/src/plugins/elasticsearch/index.js index ba917218677c5..00b1cf0b4b4ad 100644 --- a/src/plugins/elasticsearch/index.js +++ b/src/plugins/elasticsearch/index.js @@ -24,7 +24,7 @@ module.exports = function (kibana) { key: Joi.string() }).default(), apiVersion: Joi.string().default('2.0'), - minimumVerison: Joi.string().default('2.0.0') + minimumVersion: Joi.string().default('2.1.0') }).default(); }, diff --git a/src/plugins/elasticsearch/lib/__tests__/check_es_version.js b/src/plugins/elasticsearch/lib/__tests__/check_es_version.js index 25d529f77d86a..8e9b4e92bb8ba 100644 --- a/src/plugins/elasticsearch/lib/__tests__/check_es_version.js +++ b/src/plugins/elasticsearch/lib/__tests__/check_es_version.js @@ -10,7 +10,7 @@ describe('plugins/elasticsearch', function () { var plugin; beforeEach(function () { - var get = sinon.stub().withArgs('elasticserach.minimumVerison').returns('1.4.3'); + var get = sinon.stub().withArgs('elasticserach.minimumVersion').returns('1.4.3'); var config = function () { return { get: get }; }; server = { log: _.noop, @@ -22,7 +22,8 @@ describe('plugins/elasticsearch', function () { }, status: { red: sinon.stub() - } + }, + url: 'http://localhost:9210' } } }; diff --git a/src/plugins/elasticsearch/lib/__tests__/health_check.js b/src/plugins/elasticsearch/lib/__tests__/health_check.js index 4394ebda2da10..70e8e9449e17d 100644 --- a/src/plugins/elasticsearch/lib/__tests__/health_check.js +++ b/src/plugins/elasticsearch/lib/__tests__/health_check.js @@ -39,7 +39,7 @@ describe('plugins/elasticsearch', function () { nodes: { 'node-01': { version: '1.5.0', - http_address: 'inet[/127.0.0.1:9200]', + http_address: 'inet[/127.0.0.1:9210]', ip: '127.0.0.1' } } @@ -55,7 +55,7 @@ describe('plugins/elasticsearch', function () { }); it('should set the cluster green if everything is ready', function () { - get.withArgs('elasticsearch.minimumVerison').returns('1.4.4'); + get.withArgs('elasticsearch.minimumVersion').returns('1.4.4'); get.withArgs('kibana.index').returns('.my-kibana'); client.ping.returns(Promise.resolve()); client.cluster.health.returns(Promise.resolve({ timed_out: false, status: 'green' })); @@ -73,8 +73,8 @@ describe('plugins/elasticsearch', function () { it('should set the cluster red if the ping fails, then to green', function () { - get.withArgs('elasticsearch.url').returns('http://localhost:9200'); - get.withArgs('elasticsearch.minimumVerison').returns('1.4.4'); + get.withArgs('elasticsearch.url').returns('http://localhost:9210'); + get.withArgs('elasticsearch.minimumVersion').returns('1.4.4'); get.withArgs('kibana.index').returns('.my-kibana'); client.ping.onCall(0).returns(Promise.reject(new NoConnections())); client.ping.onCall(1).returns(Promise.resolve()); @@ -85,7 +85,7 @@ describe('plugins/elasticsearch', function () { expect(plugin.status.yellow.args[0][0]).to.be('Waiting for Elasticsearch'); sinon.assert.calledOnce(plugin.status.red); expect(plugin.status.red.args[0][0]).to.be( - 'Unable to connect to Elasticsearch at http://localhost:9200. Retrying in 2.5 seconds.' + 'Unable to connect to Elasticsearch at http://localhost:9210. Retrying in 2.5 seconds.' ); sinon.assert.calledTwice(client.ping); sinon.assert.calledOnce(client.nodes.info); @@ -97,8 +97,8 @@ describe('plugins/elasticsearch', function () { }); it('should set the cluster red if the health check status is red, then to green', function () { - get.withArgs('elasticsearch.url').returns('http://localhost:9200'); - get.withArgs('elasticsearch.minimumVerison').returns('1.4.4'); + get.withArgs('elasticsearch.url').returns('http://localhost:9210'); + get.withArgs('elasticsearch.minimumVersion').returns('1.4.4'); get.withArgs('kibana.index').returns('.my-kibana'); client.ping.returns(Promise.resolve()); client.cluster.health.onCall(0).returns(Promise.resolve({ timed_out: false, status: 'red' })); @@ -120,8 +120,8 @@ describe('plugins/elasticsearch', function () { }); it('should set the cluster yellow if the health check timed_out and create index', function () { - get.withArgs('elasticsearch.url').returns('http://localhost:9200'); - get.withArgs('elasticsearch.minimumVerison').returns('1.4.4'); + get.withArgs('elasticsearch.url').returns('http://localhost:9210'); + get.withArgs('elasticsearch.minimumVersion').returns('1.4.4'); get.withArgs('kibana.index').returns('.my-kibana'); client.ping.returns(Promise.resolve()); client.cluster.health.onCall(0).returns(Promise.resolve({ timed_out: true, status: 'red' })); diff --git a/src/plugins/elasticsearch/lib/__tests__/routes.js b/src/plugins/elasticsearch/lib/__tests__/routes.js index 204ff1603a4ee..5c27598419e9d 100644 --- a/src/plugins/elasticsearch/lib/__tests__/routes.js +++ b/src/plugins/elasticsearch/lib/__tests__/routes.js @@ -27,6 +27,9 @@ describe('plugins/elasticsearch', function () { }, optimize: { enabled: false + }, + elasticsearch: { + url: 'http://localhost:9210' } }); diff --git a/src/plugins/elasticsearch/lib/__tests__/validate.js b/src/plugins/elasticsearch/lib/__tests__/validate.js index e2cb6e2b6ca09..397fa234f895c 100644 --- a/src/plugins/elasticsearch/lib/__tests__/validate.js +++ b/src/plugins/elasticsearch/lib/__tests__/validate.js @@ -17,7 +17,10 @@ describe('plugins/elasticsearch', function () { server: { autoListen: false }, plugins: { scanDirs: [ fromRoot('src/plugins') ] }, logging: { quiet: true }, - optimize: { enabled: false } + optimize: { enabled: false }, + elasticsearch: { + url: 'http://localhost:9210' + } }); return kbnServer.ready() diff --git a/src/plugins/elasticsearch/lib/check_es_version.js b/src/plugins/elasticsearch/lib/check_es_version.js index 969ac62c535d2..ad0d4b478c9aa 100644 --- a/src/plugins/elasticsearch/lib/check_es_version.js +++ b/src/plugins/elasticsearch/lib/check_es_version.js @@ -7,7 +7,7 @@ module.exports = function (server) { server.log(['plugin', 'debug'], 'Checking Elasticsearch version'); var client = server.plugins.elasticsearch.client; - var minimumElasticsearchVersion = server.config().get('elasticsearch.minimumVerison'); + var minimumElasticsearchVersion = server.config().get('elasticsearch.minimumVersion'); return client.nodes.info() .then(function (info) { diff --git a/src/plugins/kibana/public/discover/controllers/discover.js b/src/plugins/kibana/public/discover/controllers/discover.js index 2b325101cbe5d..db10c46952754 100644 --- a/src/plugins/kibana/public/discover/controllers/discover.js +++ b/src/plugins/kibana/public/discover/controllers/discover.js @@ -193,6 +193,9 @@ define(function (require) { }); $scope.$watch('vis.aggs', function () { + // no timefield, no vis, nothing to update + if (!$scope.opts.timefield) return; + var buckets = $scope.vis.aggs.bySchemaGroup.buckets; if (buckets && buckets.length === 1) { diff --git a/src/server/__tests__/basePath.js b/src/server/__tests__/basePath.js index e9b362df40be9..3c3dff771b723 100644 --- a/src/server/__tests__/basePath.js +++ b/src/server/__tests__/basePath.js @@ -18,6 +18,9 @@ describe('Server basePath config', function () { plugins: { scanDirs: [src('plugins')] }, logging: { quiet: true }, optimize: { enabled: false }, + elasticsearch: { + url: 'http://localhost:9210' + } }); await kbnServer.ready(); return kbnServer; diff --git a/src/server/http/__tests__/index.js b/src/server/http/__tests__/index.js new file mode 100644 index 0000000000000..2b89e2f7172c4 --- /dev/null +++ b/src/server/http/__tests__/index.js @@ -0,0 +1,39 @@ +import expect from 'expect.js'; +import KbnServer from '../../KbnServer'; + +describe('cookie validation', function () { + let kbnServer; + beforeEach(function () { + kbnServer = new KbnServer(); + return kbnServer.ready(); + }); + afterEach(function () { + return kbnServer.close(); + }); + + it('allows non-strict cookies', function (done) { + kbnServer.server.inject({ + method: 'GET', + url: '/', + headers: { + cookie: 'test:80=value;test_80=value' + } + }, (res) => { + expect(res.payload).not.to.contain('Invalid cookie header'); + done(); + }); + }); + + it('returns an error if the cookie can\'t be parsed', function (done) { + kbnServer.server.inject({ + method: 'GET', + url: '/', + headers: { + cookie: 'a' + } + }, (res) => { + expect(res.payload).to.contain('Invalid cookie header'); + done(); + }); + }); +}); diff --git a/src/server/http/__tests__/xsrf.js b/src/server/http/__tests__/xsrf.js index a38ca767edce4..39d069c5113c3 100644 --- a/src/server/http/__tests__/xsrf.js +++ b/src/server/http/__tests__/xsrf.js @@ -23,6 +23,9 @@ describe('xsrf request filter', function () { plugins: { scanDirs: [src('plugins')] }, logging: { quiet: true }, optimize: { enabled: false }, + elasticsearch: { + url: 'http://localhost:9210' + } }); await kbnServer.ready(); diff --git a/src/server/http/index.js b/src/server/http/index.js index 26ecf8ed41767..22f66ce0a5a3a 100644 --- a/src/server/http/index.js +++ b/src/server/http/index.js @@ -14,6 +14,9 @@ module.exports = function (kbnServer, server, config) { var connectionOptions = { host: config.get('server.host'), port: config.get('server.port'), + state: { + strictHeader: false + }, routes: { cors: config.get('server.cors') } @@ -23,7 +26,36 @@ module.exports = function (kbnServer, server, config) { if (config.get('server.ssl.key') && config.get('server.ssl.cert')) { connectionOptions.tls = { key: fs.readFileSync(config.get('server.ssl.key')), - cert: fs.readFileSync(config.get('server.ssl.cert')) + cert: fs.readFileSync(config.get('server.ssl.cert')), + // The default ciphers in node 0.12.x include insecure ciphers, so until + // we enforce a more recent version of node, we craft our own list + // @see https://github.com/nodejs/node/blob/master/src/node_constants.h#L8-L28 + ciphers: [ + 'ECDHE-RSA-AES128-GCM-SHA256', + 'ECDHE-ECDSA-AES128-GCM-SHA256', + 'ECDHE-RSA-AES256-GCM-SHA384', + 'ECDHE-ECDSA-AES256-GCM-SHA384', + 'DHE-RSA-AES128-GCM-SHA256', + 'ECDHE-RSA-AES128-SHA256', + 'DHE-RSA-AES128-SHA256', + 'ECDHE-RSA-AES256-SHA384', + 'DHE-RSA-AES256-SHA384', + 'ECDHE-RSA-AES256-SHA256', + 'DHE-RSA-AES256-SHA256', + 'HIGH', + '!aNULL', + '!eNULL', + '!EXPORT', + '!DES', + '!RC4', + '!MD5', + '!PSK', + '!SRP', + '!CAMELLIA' + ].join(':'), + // We use the server's cipher order rather than the client's to prevent + // the BEAST attack + honorCipherOrder: true }; } diff --git a/src/ui/UiExports.js b/src/ui/UiExports.js index 06150249076ef..a8a3f9bd203ed 100644 --- a/src/ui/UiExports.js +++ b/src/ui/UiExports.js @@ -85,11 +85,7 @@ class UiExports { return _.chain(patterns) .map(function (pattern) { - var matches = names.filter(matcher(pattern)); - if (!matches.length) { - throw new Error('Unable to find uiExports for pattern ' + pattern); - } - return matches; + return names.filter(matcher(pattern)); }) .flattenDeep() .reduce(function (found, name) { diff --git a/src/ui/__tests__/ui_exports.js b/src/ui/__tests__/ui_exports.js new file mode 100644 index 0000000000000..7113adffd95a1 --- /dev/null +++ b/src/ui/__tests__/ui_exports.js @@ -0,0 +1,26 @@ +import expect from 'expect.js'; + +import UiExports from '../UiExports'; + +describe('UiExports', function () { + describe('#find()', function () { + it('finds exports based on the passed export names', function () { + var uiExports = new UiExports({}); + uiExports.aliases.foo = ['a', 'b', 'c']; + uiExports.aliases.bar = ['d', 'e', 'f']; + + expect(uiExports.find(['foo'])).to.eql(['a', 'b', 'c']); + expect(uiExports.find(['bar'])).to.eql(['d', 'e', 'f']); + expect(uiExports.find(['foo', 'bar'])).to.eql(['a', 'b', 'c', 'd', 'e', 'f']); + }); + + it('allows query types that match nothing', function () { + var uiExports = new UiExports({}); + uiExports.aliases.foo = ['a', 'b', 'c']; + + expect(uiExports.find(['foo'])).to.eql(['a', 'b', 'c']); + expect(uiExports.find(['bar'])).to.eql([]); + expect(uiExports.find(['foo', 'bar'])).to.eql(['a', 'b', 'c']); + }); + }); +}); diff --git a/src/ui/public/courier/__tests__/requestQueue.js b/src/ui/public/courier/__tests__/requestQueue.js new file mode 100644 index 0000000000000..723f0f016ccd4 --- /dev/null +++ b/src/ui/public/courier/__tests__/requestQueue.js @@ -0,0 +1,92 @@ +import ngMock from 'ngMock'; +import expect from 'expect.js'; +import sinon from 'auto-release-sinon'; + +import RequestQueueProv from '../_request_queue'; +import SearchStrategyProv from '../fetch/strategy/search'; +import DocStrategyProv from '../fetch/strategy/doc'; + +describe('Courier Request Queue', function () { + let docStrategy; + let requestQueue; + let searchStrategy; + + beforeEach(ngMock.module('kibana')); + beforeEach(ngMock.inject(function (Private) { + docStrategy = Private(DocStrategyProv); + requestQueue = Private(RequestQueueProv); + searchStrategy = Private(SearchStrategyProv); + })); + + class MockReq { + constructor(strategy, startable = true) { + this.strategy = strategy; + this.source = {}; + this.canStart = sinon.stub().returns(startable); + } + } + + describe('#getStartable(strategy)', function () { + it('only returns requests that match one of the passed strategies', function () { + requestQueue.push( + new MockReq(docStrategy), + new MockReq(searchStrategy), + new MockReq(searchStrategy), + new MockReq(searchStrategy) + ); + + expect(requestQueue.getStartable(docStrategy)).to.have.length(1); + expect(requestQueue.getStartable(searchStrategy)).to.have.length(3); + }); + + it('returns all requests when no strategy passed', function () { + requestQueue.push( + new MockReq(docStrategy), + new MockReq(searchStrategy) + ); + + expect(requestQueue.getStartable()).to.have.length(2); + }); + + it('returns only startable requests', function () { + requestQueue.push( + new MockReq(docStrategy, true), + new MockReq(searchStrategy, false) + ); + + expect(requestQueue.getStartable()).to.have.length(1); + }); + }); + + describe('#get(strategy)', function () { + it('only returns requests that match one of the passed strategies', function () { + requestQueue.push( + new MockReq(docStrategy), + new MockReq(searchStrategy), + new MockReq(searchStrategy), + new MockReq(searchStrategy) + ); + + expect(requestQueue.get(docStrategy)).to.have.length(1); + expect(requestQueue.get(searchStrategy)).to.have.length(3); + }); + + it('returns all requests when no strategy passed', function () { + requestQueue.push( + new MockReq(docStrategy), + new MockReq(searchStrategy) + ); + + expect(requestQueue.get()).to.have.length(2); + }); + + it('returns startable and not-startable requests', function () { + requestQueue.push( + new MockReq(docStrategy, true), + new MockReq(searchStrategy, false) + ); + + expect(requestQueue.get()).to.have.length(2); + }); + }); +}); diff --git a/src/ui/public/courier/_request_queue.js b/src/ui/public/courier/_request_queue.js index ca8ce4ad793b5..e6dcf78957c66 100644 --- a/src/ui/public/courier/_request_queue.js +++ b/src/ui/public/courier/_request_queue.js @@ -16,8 +16,11 @@ define(function (require) { }); }; - queue.get = function (/* strategies.. */) { - var strategies = _.toArray(arguments); + queue.getStartable = function (...strategies) { + return queue.get(...strategies).filter(req => req.canStart()); + }; + + queue.get = function (...strategies) { return queue.filter(function (req) { var strategyMatch = !strategies.length; if (!strategyMatch) { @@ -26,7 +29,7 @@ define(function (require) { }); } - return strategyMatch && req.canStart(); + return strategyMatch; }); }; diff --git a/src/ui/public/courier/data_source/__tests__/DocSource.js b/src/ui/public/courier/data_source/__tests__/DocSource.js new file mode 100644 index 0000000000000..6ff86a2c80c01 --- /dev/null +++ b/src/ui/public/courier/data_source/__tests__/DocSource.js @@ -0,0 +1,59 @@ +import ngMock from 'ngMock'; +import expect from 'expect.js'; +import sinon from 'auto-release-sinon'; + +import RequestQueueProv from '../../_request_queue'; +import DocSourceProv from '../doc_source'; + +describe('DocSource', function () { + require('testUtils/noDigestPromises').activateForSuite(); + + let requestQueue; + let DocSource; + + beforeEach(ngMock.module('kibana')); + beforeEach(ngMock.inject(function (Private) { + requestQueue = Private(RequestQueueProv); + DocSource = Private(DocSourceProv); + })); + + describe('#onUpdate()', function () { + it('adds a request to the requestQueue', function () { + const source = new DocSource(); + + expect(requestQueue).to.have.length(0); + source.onUpdate(); + expect(requestQueue).to.have.length(1); + }); + + it('returns a promise that is resolved with the results', function () { + const source = new DocSource(); + const fakeResults = {}; + + const promise = source.onUpdate().then((results) => { + expect(results).to.be(fakeResults); + }); + + requestQueue[0].defer.resolve(fakeResults); + return promise; + }); + }); + + describe('#destroy()', function () { + it('aborts all startable requests', function () { + const source = new DocSource(); + source.onUpdate(); + sinon.stub(requestQueue[0], 'canStart').returns(true); + source.destroy(); + expect(requestQueue).to.have.length(0); + }); + + it('aborts all non-startable requests', function () { + const source = new DocSource(); + source.onUpdate(); + sinon.stub(requestQueue[0], 'canStart').returns(false); + source.destroy(); + expect(requestQueue).to.have.length(0); + }); + }); +}); diff --git a/src/ui/public/courier/data_source/__tests__/SearchSource.js b/src/ui/public/courier/data_source/__tests__/SearchSource.js new file mode 100644 index 0000000000000..02f4f73d2aa29 --- /dev/null +++ b/src/ui/public/courier/data_source/__tests__/SearchSource.js @@ -0,0 +1,59 @@ +import ngMock from 'ngMock'; +import expect from 'expect.js'; +import sinon from 'auto-release-sinon'; + +import RequestQueueProv from '../../_request_queue'; +import SearchSourceProv from '../search_source'; + +describe('SearchSource', function () { + require('testUtils/noDigestPromises').activateForSuite(); + + let requestQueue; + let SearchSource; + + beforeEach(ngMock.module('kibana')); + beforeEach(ngMock.inject(function (Private) { + requestQueue = Private(RequestQueueProv); + SearchSource = Private(SearchSourceProv); + })); + + describe('#onResults()', function () { + it('adds a request to the requestQueue', function () { + const source = new SearchSource(); + + expect(requestQueue).to.have.length(0); + source.onResults(); + expect(requestQueue).to.have.length(1); + }); + + it('returns a promise that is resolved with the results', function () { + const source = new SearchSource(); + const fakeResults = {}; + + const promise = source.onResults().then((results) => { + expect(results).to.be(fakeResults); + }); + + requestQueue[0].defer.resolve(fakeResults); + return promise; + }); + }); + + describe('#destroy()', function () { + it('aborts all startable requests', function () { + const source = new SearchSource(); + source.onResults(); + sinon.stub(requestQueue[0], 'canStart').returns(true); + source.destroy(); + expect(requestQueue).to.have.length(0); + }); + + it('aborts all non-startable requests', function () { + const source = new SearchSource(); + source.onResults(); + sinon.stub(requestQueue[0], 'canStart').returns(false); + source.destroy(); + expect(requestQueue).to.have.length(0); + }); + }); +}); diff --git a/src/ui/public/courier/data_source/_abstract.js b/src/ui/public/courier/data_source/_abstract.js index 2196ce07f2abb..f66996048e1c4 100644 --- a/src/ui/public/courier/data_source/_abstract.js +++ b/src/ui/public/courier/data_source/_abstract.js @@ -121,7 +121,10 @@ define(function (require) { SourceAbstract.prototype.onResults = function (handler) { var self = this; - return new PromiseEmitter(function (resolve, reject, defer) { + return new PromiseEmitter(function (resolve, reject) { + const defer = Promise.defer(); + defer.promise.then(resolve, reject); + self._createRequest(defer); }, handler); }; @@ -142,7 +145,10 @@ define(function (require) { SourceAbstract.prototype.onError = function (handler) { var self = this; - return new PromiseEmitter(function (resolve, reject, defer) { + return new PromiseEmitter(function (resolve, reject) { + const defer = Promise.defer(); + defer.promise.then(resolve, reject); + errorHandlers.push({ source: self, defer: defer @@ -161,7 +167,7 @@ define(function (require) { */ SourceAbstract.prototype.fetch = function () { var self = this; - var req = _.first(self._myQueued()); + var req = _.first(self._myStartableQueued()); if (!req) { req = self._createRequest(); @@ -177,7 +183,7 @@ define(function (require) { * @async */ SourceAbstract.prototype.fetchQueued = function () { - return courierFetch.these(this._myQueued()); + return courierFetch.these(this._myStartableQueued()); }; /** @@ -185,7 +191,10 @@ define(function (require) { * @return {undefined} */ SourceAbstract.prototype.cancelQueued = function () { - _.invoke(this._myQueued(), 'abort'); + requestQueue + .get(this._fetchStrategy) + .filter(req => req.source === this) + .forEach(req => req.abort()); }; /** @@ -200,9 +209,10 @@ define(function (require) { * PRIVATE API *****/ - SourceAbstract.prototype._myQueued = function () { - var reqs = requestQueue.get(this._fetchStrategy); - return _.where(reqs, { source: this }); + SourceAbstract.prototype._myStartableQueued = function () { + return requestQueue + .getStartable(this._fetchStrategy) + .filter(req => req.source === this); }; SourceAbstract.prototype._createRequest = function () { diff --git a/src/ui/public/courier/fetch/fetch.js b/src/ui/public/courier/fetch/fetch.js index 03ce8199aa431..adf1871bb4253 100644 --- a/src/ui/public/courier/fetch/fetch.js +++ b/src/ui/public/courier/fetch/fetch.js @@ -9,7 +9,7 @@ define(function (require) { var INCOMPLETE = Private(require('ui/courier/fetch/_req_status')).INCOMPLETE; function fetchQueued(strategy) { - var requests = requestQueue.get(strategy); + var requests = requestQueue.getStartable(strategy); if (!requests.length) return Promise.resolve(); else return fetchThese(requests); } diff --git a/src/ui/public/courier/fetch/request/segmented.js b/src/ui/public/courier/fetch/request/segmented.js index 8289c16d1b97d..a2291b3b99c6b 100644 --- a/src/ui/public/courier/fetch/request/segmented.js +++ b/src/ui/public/courier/fetch/request/segmented.js @@ -2,7 +2,6 @@ define(function (require) { return function CourierSegmentedReqProvider(es, Private, Promise, Notifier, timefilter, config) { var _ = require('lodash'); var SearchReq = Private(require('ui/courier/fetch/request/search')); - var requestQueue = Private(require('ui/courier/_request_queue')); var SegmentedHandle = Private(require('ui/courier/fetch/request/_segmented_handle')); var notify = new Notifier({ diff --git a/src/ui/public/directives/__tests__/truncate.js b/src/ui/public/directives/__tests__/truncate.js index 0e255ff3d60ff..071a5743fe539 100644 --- a/src/ui/public/directives/__tests__/truncate.js +++ b/src/ui/public/directives/__tests__/truncate.js @@ -23,7 +23,7 @@ var init = function (text) { // Create the element $elem = angular.element( - '' + '' ); // And compile it diff --git a/src/ui/public/index_patterns/__tests__/calculate_indices.js b/src/ui/public/index_patterns/__tests__/calculate_indices.js index 9405eb03c6dcb..d5c44ec2d00cb 100644 --- a/src/ui/public/index_patterns/__tests__/calculate_indices.js +++ b/src/ui/public/index_patterns/__tests__/calculate_indices.js @@ -12,11 +12,13 @@ describe('ui/index_patterns/_calculate_indices', () => { let response; let config; let constraints; + let indices; beforeEach(ngMock.module('kibana', ($provide) => { response = { indices: { - 'mock-*': 'irrelevant, is ignored' + 'mock-*': { fields: { '@something': {} } }, + 'ignore-*': { fields: {} } } }; @@ -37,7 +39,9 @@ describe('ui/index_patterns/_calculate_indices', () => { })); function run({ start = undefined, stop = undefined } = {}) { - calculateIndices('wat-*-no', '@something', start, stop); + calculateIndices('wat-*-no', '@something', start, stop).then(value => { + indices = value; + }); $rootScope.$apply(); config = _.first(es.fieldStats.lastCall.args); constraints = config.body.index_constraints; @@ -72,6 +76,9 @@ describe('ui/index_patterns/_calculate_indices', () => { it('max_value is set to original if not a moment object', () => { expect(constraints['@something'].max_value.gte).to.equal('1234567890'); }); + it('max_value format is set to epoch_millis', () => { + expect(constraints['@something'].max_value.format).to.equal('epoch_millis'); + }); it('max_value is set to moment.valueOf if given a moment object', () => { const start = moment(); run({ start }); @@ -90,6 +97,9 @@ describe('ui/index_patterns/_calculate_indices', () => { it('min_value is set to original if not a moment object', () => { expect(constraints['@something'].min_value.lte).to.equal('1234567890'); }); + it('min_value format is set to epoch_millis', () => { + expect(constraints['@something'].min_value.format).to.equal('epoch_millis'); + }); it('max_value is set to moment.valueOf if given a moment object', () => { const stop = moment(); run({ stop }); @@ -98,6 +108,14 @@ describe('ui/index_patterns/_calculate_indices', () => { }); }); + describe('response filtering', () => { + it('filters out any indices that have empty fields', () => { + run(); + expect(_.includes(indices, 'mock-*')).to.be(true); + expect(_.includes(indices, 'ignore-*')).to.be(false); + }); + }); + describe('response sorting', function () { require('testUtils/noDigestPromises').activateForSuite(); diff --git a/src/ui/public/index_patterns/_calculate_indices.js b/src/ui/public/index_patterns/_calculate_indices.js index bbe1b0f51cd85..9401e1d923ecf 100644 --- a/src/ui/public/index_patterns/_calculate_indices.js +++ b/src/ui/public/index_patterns/_calculate_indices.js @@ -7,6 +7,23 @@ define(function (require) { return moment.isMoment(val) ? val.valueOf() : val; } + // returns a properly formatted millisecond timestamp index constraint + function msConstraint(comparison, value) { + return { + [comparison]: timeValue(value), + format: 'epoch_millis' + }; + } + + // returns a new object with any indexes removed that do not include the + // time field + // + // fixme: this really seems like a bug that needs to be fixed in + // elasticsearch itself, but this workaround will do for now + function omitIndicesWithoutTimeField(indices, timeFieldName) { + return _.pick(indices, index => index.fields[timeFieldName]); + } + return function CalculateIndicesFactory(Promise, es) { // Uses the field stats api to determine the names of indices that need to @@ -14,7 +31,9 @@ define(function (require) { // given time range function calculateIndices(pattern, timeFieldName, start, stop, sortDirection) { return getFieldStats(pattern, timeFieldName, start, stop) - .then(resp => sortIndexStats(resp, timeFieldName, sortDirection)); + .then(resp => resp.indices) + .then(indices => omitIndicesWithoutTimeField(indices, timeFieldName)) + .then(indices => sortIndexStats(indices, timeFieldName, sortDirection)); }; // creates the configuration hash that must be passed to the elasticsearch @@ -22,10 +41,10 @@ define(function (require) { function getFieldStats(pattern, timeFieldName, start, stop) { const constraints = {}; if (start) { - constraints.max_value = { gte: timeValue(start) }; + constraints.max_value = msConstraint('gte', start); } if (stop) { - constraints.min_value = { lte: timeValue(stop) }; + constraints.min_value = msConstraint('lte', stop); } return es.fieldStats({ @@ -40,14 +59,14 @@ define(function (require) { }); } - function sortIndexStats(resp, timeFieldName, sortDirection) { - if (!sortDirection) return _.keys(resp.indices); + function sortIndexStats(indices, timeFieldName, sortDirection) { + if (!sortDirection) return _.keys(indices); // FIXME: Once https://github.com/elastic/elasticsearch/issues/14404 is closed // this should be sorting based on the sortable value of a field. const edgeKey = sortDirection === 'desc' ? 'max_value' : 'min_value'; - return _(resp.indices) + return _(indices) .map((stats, index) => ( { index, edge: stats.fields[timeFieldName][edgeKey] } )) diff --git a/src/ui/public/promises/__tests__/promises.js b/src/ui/public/promises/__tests__/promises.js index 874ae2946748a..b9617ee3e2cf8 100644 --- a/src/ui/public/promises/__tests__/promises.js +++ b/src/ui/public/promises/__tests__/promises.js @@ -12,6 +12,16 @@ describe('Promise service', function () { $rootScope = $injector.get('$rootScope'); })); + describe('Constructor', function () { + it('provides resolve and reject function', function () { + new Promise(function (resolve, reject) { + expect(resolve).to.be.a('function'); + expect(reject).to.be.a('function'); + expect(arguments).to.have.length(2); + }); + }); + }); + describe('Promise.fromNode', function () { it('creates a callback that controls a promise', function () { let callback; diff --git a/src/ui/public/promises/promises.js b/src/ui/public/promises/promises.js index bb9a32df40616..8189f7445945e 100644 --- a/src/ui/public/promises/promises.js +++ b/src/ui/public/promises/promises.js @@ -11,7 +11,7 @@ define(function (require) { var defer = $q.defer(); try { - fn(defer.resolve, defer.reject, defer); + fn(defer.resolve, defer.reject); } catch (e) { defer.reject(e); } diff --git a/tasks/config/esvm.js b/tasks/config/esvm.js index 571ca7e12388a..1362582e0cfcd 100644 --- a/tasks/config/esvm.js +++ b/tasks/config/esvm.js @@ -28,6 +28,9 @@ module.exports = function (grunt) { config: { path: { data: dataDir + }, + cluster: { + name: 'esvm-dev' } } } @@ -35,7 +38,15 @@ module.exports = function (grunt) { test: { options: { directory: resolve(directory, 'test'), - purge: true + purge: true, + config: { + http: { + port: 9210 + }, + cluster: { + name: 'esvm-test' + } + } } }, ui: { @@ -45,6 +56,9 @@ module.exports = function (grunt) { config: { http: { port: uiConfig.servers.elasticsearch.port + }, + cluster: { + name: 'esvm-ui' } } } diff --git a/test/fixtures/scenarioManager.js b/test/fixtures/scenarioManager.js index 84376c0f70caa..08856286c7228 100644 --- a/test/fixtures/scenarioManager.js +++ b/test/fixtures/scenarioManager.js @@ -7,7 +7,8 @@ function ScenarioManager(server) { if (!server) throw new Error('No server defined'); this.client = new elasticsearch.Client({ - host: server + host: server, + requestTimeout: 300000 }); } diff --git a/test/functional/apps/discover/_discover.js b/test/functional/apps/discover/_discover.js index 44320025b7a17..b17af315284d5 100644 --- a/test/functional/apps/discover/_discover.js +++ b/test/functional/apps/discover/_discover.js @@ -12,7 +12,6 @@ define(function (require) { var settingsPage; var discoverPage; var remote; - this.timeout = 60000; bdd.before(function () { common = new Common(this.remote); @@ -71,7 +70,6 @@ define(function (require) { bdd.it('save query should show toast message and display query name', function () { var expectedSavedQueryMessage = 'Discover: Saved Data Source "' + queryName1 + '"'; - this.timeout = 60000; return discoverPage.saveSearch(queryName1) .then(function () { return headerPage.getToastMessage(); @@ -94,12 +92,10 @@ define(function (require) { bdd.it('load query should show query name', function () { return discoverPage.loadSavedSearch(queryName1) .then(function () { - return common.tryForTime(15000, function () { - return discoverPage.getCurrentQueryName() - .then(function (actualQueryNameString) { - expect(actualQueryNameString).to.be(queryName1); - }); - }); + return discoverPage.getCurrentQueryName(); + }) + .then(function (actualQueryNameString) { + expect(actualQueryNameString).to.be(queryName1); }) .catch(common.handleError(this)); }); diff --git a/test/functional/apps/discover/index.js b/test/functional/apps/discover/index.js index 2e1df80ac7ca1..28d1e78bf3bdb 100644 --- a/test/functional/apps/discover/index.js +++ b/test/functional/apps/discover/index.js @@ -13,6 +13,7 @@ define(function (require) { var scenarioManager; var remote; var scenarioManager = new ScenarioManager(url.format(config.servers.elasticsearch)); + this.timeout = 120000; // on setup, we create an settingsPage instance // that we will use for all the tests diff --git a/test/functional/apps/settings/_index_pattern_results_sort.js b/test/functional/apps/settings/_index_pattern_results_sort.js index 169402667cf9e..72d7474356fed 100644 --- a/test/functional/apps/settings/_index_pattern_results_sort.js +++ b/test/functional/apps/settings/_index_pattern_results_sort.js @@ -1,4 +1,5 @@ define(function (require) { + var config = require('intern').config; var Common = require('../../../support/pages/Common'); var SettingsPage = require('../../../support/pages/SettingsPage'); var expect = require('intern/dojo/node!expect.js'); @@ -9,6 +10,7 @@ define(function (require) { var common; var settingsPage; var remote; + var defaultTimeout = config.timeouts.default; bdd.before(function () { common = new Common(this.remote); @@ -93,9 +95,11 @@ define(function (require) { }); bdd.it('makelogs data should have expected number of fields', function () { - return settingsPage.getFieldsTabCount() - .then(function (tabCount) { - expect(tabCount).to.be('' + expectedFieldCount); + return common.tryForTime(defaultTimeout, function () { + return settingsPage.getFieldsTabCount() + .then(function (tabCount) { + expect(tabCount).to.be('' + expectedFieldCount); + }); }) .catch(common.handleError(this)); }); diff --git a/test/functional/apps/settings/index.js b/test/functional/apps/settings/index.js index df0753df07ce3..90fbed52f13c6 100644 --- a/test/functional/apps/settings/index.js +++ b/test/functional/apps/settings/index.js @@ -12,6 +12,7 @@ define(function (require) { bdd.describe('settings app', function () { var scenarioManager = new ScenarioManager(url.format(config.servers.elasticsearch)); + this.timeout = 120000; // on setup, we create an settingsPage instance // that we will use for all the tests diff --git a/test/intern.js b/test/intern.js index a1f3ec28e0f49..b46ee03e1e2b3 100644 --- a/test/intern.js +++ b/test/intern.js @@ -3,10 +3,10 @@ define(function (require) { var _ = require('intern/dojo/node!lodash'); return _.assign({ - debug: false, + debug: true, capabilities: { 'selenium-version': '2.47.1', - 'idle-timeout': 30 + 'idle-timeout': 99 }, environments: [{ browserName: 'firefox' @@ -23,6 +23,9 @@ define(function (require) { 'bluebird': './node_modules/bluebird/js/browser/bluebird.js', 'moment': './node_modules/moment/moment.js' } - } + }, + timeouts: { + default: 90000 + }, }, serverConfig); }); diff --git a/test/support/pages/Common.js b/test/support/pages/Common.js index 567590f35851b..a531c4c177efa 100644 --- a/test/support/pages/Common.js +++ b/test/support/pages/Common.js @@ -11,7 +11,7 @@ define(function (require) { this.remote = remote; } - var defaultTimeout = 60000; + var defaultTimeout = config.timeouts.default; Common.prototype = { constructor: Common, @@ -19,6 +19,7 @@ define(function (require) { navigateToApp: function (appName, testStatusPage) { var self = this; var appUrl = getUrl(config.servers.kibana, config.apps[appName]); + self.debug('navigating to ' + appName + ' url: ' + appUrl); var doNavigation = function (url) { return self.tryForTime(defaultTimeout, function () { @@ -31,7 +32,11 @@ define(function (require) { if (testStatusPage !== false) { return self.checkForKibanaApp() .then(function (kibanaLoaded) { - if (!kibanaLoaded) throw new Error('Kibana is not loaded, retrying'); + if (!kibanaLoaded) { + var msg = 'Kibana is not loaded, retrying'; + self.debug(msg); + throw new Error(msg); + } }); } }) @@ -40,15 +45,20 @@ define(function (require) { }) .then(function (currentUrl) { var navSuccessful = new RegExp(appUrl).test(currentUrl); - if (!navSuccessful) throw new Error('App failed to load: ' + appName); + if (!navSuccessful) { + var msg = 'App failed to load: ' + appName + + ' in ' + defaultTimeout + 'ms' + + ' currentUrl = ' + currentUrl; + self.debug(msg); + throw new Error(msg); + } + + return currentUrl; }); }); }; return doNavigation(appUrl) - .then(function () { - return self.remote.getCurrentUrl(); - }) .then(function (currentUrl) { var lastUrl = currentUrl; return self.tryForTime(defaultTimeout, function () { @@ -138,12 +148,8 @@ define(function (require) { return Promise .try(block) - .then(function tryForTimeSuccess() { - self.debug('tryForTime success in about ' + (lastTry - start) + ' ms'); - return (lastTry - start); - }) .catch(function tryForTimeCatch(err) { - self.debug('tryForTime failure, retry in ' + retryDelay + 'ms - ' + err.message); + self.debug('tryForTime failure: ' + err.message); tempMessage = err.message; return Promise.delay(retryDelay).then(attempt); }); diff --git a/test/support/pages/DiscoverPage.js b/test/support/pages/DiscoverPage.js index de373f4d176b3..a3dc9ac6c1848 100644 --- a/test/support/pages/DiscoverPage.js +++ b/test/support/pages/DiscoverPage.js @@ -1,11 +1,9 @@ // in test/support/pages/DiscoverPage.js define(function (require) { - // the page object is created as a constructor - // so we can provide the remote Command object - // at runtime + var config = require('intern').config; var Common = require('./Common'); - var defaultTimeout = 20000; + var defaultTimeout = config.timeouts.default; var common; var thisTime; @@ -76,10 +74,11 @@ define(function (require) { }, getCurrentQueryName: function getCurrentQueryName() { - return thisTime - .findByCssSelector('span.discover-info-title') - // .findByCssSelector('span[bo-bind="opts.savedSearch.title"]') - .getVisibleText(); + return common.tryForTime(defaultTimeout, function () { + return thisTime + .findByCssSelector('span.discover-info-title') + .getVisibleText(); + }); }, getBarChartData: function getBarChartData() { diff --git a/test/support/pages/HeaderPage.js b/test/support/pages/HeaderPage.js index 303b2cd02147b..40353ade7421a 100644 --- a/test/support/pages/HeaderPage.js +++ b/test/support/pages/HeaderPage.js @@ -1,6 +1,6 @@ // in test/support/pages/HeaderPage.js define(function (require) { - + var config = require('intern').config; var Common = require('./Common'); var common; @@ -13,14 +13,14 @@ define(function (require) { common = new Common(this.remote); } - var defaultTimeout = 5000; + var defaultTimeout = config.timeouts.default; HeaderPage.prototype = { constructor: HeaderPage, clickSelector: function (selector) { var self = this.remote; - return common.tryForTime(5000, function () { + return common.tryForTime(defaultTimeout, function () { return self.setFindTimeout(defaultTimeout) .findByCssSelector(selector) .then(function (tab) { @@ -111,16 +111,21 @@ define(function (require) { waitForToastMessageGone: function waitForToastMessageGone() { var self = this; - return common.tryForTime(defaultTimeout * 5, function tryingForTime() { - return self.remote.setFindTimeout(1000) - .findAllByCssSelector('kbn-truncated.toast-message.ng-isolate-scope') + return common.tryForTime(defaultTimeout, function () { + return self.remote.setFindTimeout(500) + .findAllByCssSelector('kbn-truncated.toast-message') .then(function toastMessage(messages) { if (messages.length > 0) { + common.debug('toast message found, waiting...'); throw new Error('waiting for toast message to clear'); } else { - common.debug('now messages = 0 "' + messages + '"'); + common.debug('toast message clear'); return messages; } + }) + .catch(function () { + common.debug('toast message not found'); + return; }); }); } diff --git a/test/support/pages/SettingsPage.js b/test/support/pages/SettingsPage.js index 3a2abc6f805d0..dbba239f40f6e 100644 --- a/test/support/pages/SettingsPage.js +++ b/test/support/pages/SettingsPage.js @@ -1,13 +1,10 @@ // in test/support/pages/SettingsPage.js define(function (require) { - // the page object is created as a constructor - // so we can provide the remote Command object - // at runtime - + var config = require('intern').config; var Promise = require('bluebird'); var Common = require('./Common'); - var defaultTimeout = 60000; + var defaultTimeout = config.timeouts.default; var common; function SettingsPage(remote) { @@ -47,12 +44,22 @@ define(function (require) { selectTimeFieldOption: function (selection) { var self = this; + // open dropdown return self.getTimeFieldNameField().click() .then(function () { + // close dropdown, keep focus return self.getTimeFieldNameField().click(); }) .then(function () { - return self.getTimeFieldOption(selection); + return common.tryForTime(defaultTimeout, function () { + return self.getTimeFieldOption(selection).click() + .then(function () { + return self.getTimeFieldOption(selection).isSelected(); + }) + .then(function (selected) { + if (!selected) throw new Error('option not selected: ' + selected); + }); + }); }); }, @@ -126,11 +133,13 @@ define(function (require) { var self = this; var selector = 'li.kbn-settings-tab.active a small'; - return self.remote.setFindTimeout(defaultTimeout) - .findByCssSelector(selector).getVisibleText() - .then(function (theText) { - // the value has () around it, remove them - return theText.replace(/\((.*)\)/, '$1'); + return common.tryForTime(defaultTimeout, function () { + return self.remote.setFindTimeout(defaultTimeout / 10) + .findByCssSelector(selector).getVisibleText() + .then(function (theText) { + // the value has () around it, remove them + return theText.replace(/\((.*)\)/, '$1'); + }); }); },