diff --git a/gatsby-config.js b/gatsby-config.js
index 0a75c04183..02f55285bc 100644
--- a/gatsby-config.js
+++ b/gatsby-config.js
@@ -174,6 +174,8 @@ if (process.env.BUCKET_NAME) {
options: {
bucketName: process.env.BUCKET_NAME,
region: process.env.BUCKET_REGION,
+ protocol: 'https',
+ hostname: isProduction ? 'k6.io' : 'staging.k6.io'
},
});
}
diff --git a/gatsby-node.js b/gatsby-node.js
index 1c2833c221..e8986b3b3d 100644
--- a/gatsby-node.js
+++ b/gatsby-node.js
@@ -330,12 +330,55 @@ async function createDocPages({ graphql, actions, pathPrefix }) {
}
const createRedirects = ({ actions, pathPrefix }) => {
- actions.createRedirect({
+ const { createRedirect } = actions;
+
+ createRedirect({
fromPath: `${pathPrefix}/getting-started/welcome`,
toPath: pathPrefix ? pathPrefix : `/`,
redirectInBrowser: true,
isPermanent: true,
});
+
+ createRedirect({
+ fromPath: '/getting-started/results-output/apache-kafka',
+ toPath: '/results-visualization/apache-kafka',
+ isPermanent: true
+ });
+
+ createRedirect({
+ fromPath: '/getting-started/results-output/cloud',
+ toPath: '/results-visualization/cloud',
+ isPermanent: true
+ });
+ createRedirect({
+ fromPath: '/results-visualization/k6-cloud-test-results',
+ toPath: '/results-visualization/cloud',
+ isPermanent: true
+ });
+
+ createRedirect({
+ fromPath: '/getting-started/results-output/datadog',
+ toPath: '/results-visualization/datadog',
+ isPermanent: true
+ });
+ createRedirect({
+ fromPath: '/getting-started/results-output/influxdb',
+ toPath: '/results-visualization/influxdb-+-grafana',
+ isPermanent: true
+ });
+
+ createRedirect({
+ fromPath: '/getting-started/results-output/json',
+ toPath: '/results-visualization/json',
+ isPermanent: true
+ });
+
+ createRedirect({
+ fromPath: '/getting-started/results-output/statsd',
+ toPath: '/results-visualization/statsd',
+ isPermanent: true
+ });
+
};
exports.createPages = async (options) => {
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md
index 93bbfd880e..1245351fef 100755
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md
+++ b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output.md
@@ -1,216 +1,143 @@
---
-title: 'Results Output'
+title: 'Results output'
excerpt: ''
---
-## Overview
+By default, the `k6 run` command prints runtime information and general results to `stdout`.
-By default, k6 will print runtime information and general results to `stdout` while the test is running, and a summary after the test has ended.
+## Standard output
-## Runtime output to stdout
+![k6 results - console/stdout output](images/k6-results-stdout.png)
-![Output to stdout img](images/output-to-stdout.png)
+When k6 displays the results to `stdout`, it will show the k6 logo and the following test information:
-By default, k6 sends its output to stdout only. When started up, it will display a very tasteful ASCII splash screen with the k6 logo and version information, plus details about the test and options active. We will go through the things one by one here:
+- Test details: general test information and load options.
+- Progress bar: test status and how much time has passed.
+- Test summary: the test results (after test completion).
-- `execution: local` k6 is not being used to control another k6 instance (distributed execution).
-- `output: -` Output is sent to stdout only.
-- `script: group.js (js)` Shows what script we are running. The `(js)` at the end indicates that k6 thinks this file contains JavaScript code (that should be executed by the VUs).
-- `duration: 0s, iterations: 1` The VUs in the test will only perform one single script iteration (calling the default function once) each, and there is no time limit set.
-- `vus: 1, max: 1` Simulate 1 VU (virtual user), allocate resources for a "max" of 1 VU (meaning we can't scale up the load level in this case).
-- `done [==============] 800ms / 800ms` This is the progress bar that updates while the test is running, to indicate how far the test has come and how much time has passed.
-- `█ my user scenario` is the name of a group we have created in our JS script.
-- `█ front page` is the name of a sub-group that was created inside the previously mentioned group ("my user scenario").
-- `✓ 100.00% - status code is 200` is the result from a check() that was executed inside the "front page" group. Note how this check result is indented, to indicate that it belongs to the "front page" group. The "front page" group name, in turn, is indented to indicate it belongs to its parent group ("my user scenario").
-- `█ features page` is another group that belongs to the parent group "my user scenario".
-- `✓ 100.00% - status code is 200` and `✓ 100.00% - h1 message is correct` are two more checks that belong to the "features page" group.
-- `checks................: 100.00%` tells us the percentage of our checks that passed.
+### Test details
-And then comes the HTTP timing information. There are several metrics being reported here, and percentiles etc. for each of them:
+
-- `http_req_blocked` The time VUs spent waiting to be allocated a TCP connection from the connection pool.
-- `http_req_connecting` The time VUs spent performing TCP handshakes (setting up TCP connections to the remote host).
-- `http_req_looking_up` The time spent performing DNS lookups.
-- `http_req_sending` The time spent transmitting HTTP requests to the remote host.
-- `http_req_waiting` The time spent waiting for a response to come back from the remote host (after having sent a request).
-- `http_req_receiving` The time spent receiving a reply from the remote host.
-- `http_req_duration` Total time for the request. It's equal to `http_req_sending + http_req_waiting + http_req_receiving` (i.e. how long did the remote server take to process the request and respond, without the initial DNS lookup/connection times).
+```shell
+execution: local
+ output: -
+ script: script.js
+
+duration: 1m0s, iterations: -
+ vus: 100, max: 100
+```
+
+
+
+- `execution: local` the k6 execution mode (local or cloud).
+- `output: -` the output of the test results. The default is `stdout`.
+- `script: script.js` shows the name of the script that is being executed.
+- `duration: 1m0s` the test run [duration](/using-k6/options#duration).
+- `iterations: -` the total number of VU [iterations](https://k6.io/docs/using-k6/options#iterations).
+- `vus: 100` the initial number of VUs that test will start running.
+- `max: 100` the maximun number of VUs that the test will scale.
+
+### Test summary
+
+The test summary provides a general overview of your test result. The summary prints to `stdout` the status of:
+
+- [Built-in metrics](/using-k6/metrics#built-in-metrics) and [custom metrics](/using-k6/metrics#custom-metrics).
+- [Checks](/using-k6/checks) and [thresholds](/using-k6/thresholds).
+- [Groups](/using-k6/tags-and-groups#groups) and [tags](/using-k6/tags-and-groups#tags).
-All of these are metrics of the Trend type, which means you can extract max, min, [percentile](https://en.wikipedia.org/wiki/Percentile), average values from them. On stdout they are printed like this:
-
-After the HTTP timing metrics, there will be a few final lines of output:
+> To learn more about the metrics k6 collects and reports, read the [Metrics guide](/using-k6/metrics).
-- `http_reqs........: 2` The total number of HTTP requests made during the whole load test.
-- `iterations........: 1` The total number of times all VUs in the test managed to run through the default() function.
-- `vus.................: 1` How many VUs the test was configured to simulate.
-- `vus_max........: 1` The number of pre-allocated VU slots the test was configured for (vus_max allows you to scale up the number of VUs in the test to max that number).
+**Output of trend metrics**
-## Output result data
+[Trend metrics](/using-k6/metrics#metric-types) collect trend statistics (min/max/avg/percentiles) for a series of values. On stdout they are printed like this:
-k6 may also output more granular result data using special output plugins. Currently, there are a few plugins that can output data:
+
+
+You could use the [summary-trend-stats](/using-k6/options#summary-trend-stats) option to change the stats reported for Trend metrics.
+
+
+
+
+```shell
+$ k6 run --summary-trend-stats="avg,p(99)" script.js
+```
+
+
+
+
+
+## Output plugins
+
+k6 can send more granular result data to different outputs to integrate and visualize k6 metrics on other platforms.
+
+The list of output plugins are:
+
+| Plugin | Usage |
+|-|-|
+| [Apache Kafka](/results-visualization/apache-kafka) | `k6 run --out kafka` |
+| [Cloud](/results-visualization/cloud) | `k6 run --out cloud` |
+| [Datadog](/results-visualization/datadog) | `k6 run --out datadog` |
+| [InfluxDB](/results-visualization/influxdb-+-grafana) | `k6 run --out influxdb` |
+| [JSON](/results-visualization/json) | `k6 run --out json` |
+| [StatsD](/results-visualization/statsd) | `k6 run --out statsd` |
-- [JSON](/getting-started/results-output/json) plugin that writes data in JSON format to a file
-- Plugins that push the metrics to:
- - [Apache Kafka](/getting-started/results-output/apache-kafka)
- - [Datadog](/getting-started/results-output/datadog)
- - [InfluxDB](/getting-started/results-output/influxdb)
- - [StatsD](/getting-started/results-output/statsd)
-- [Cloud](/getting-started/results-output/cloud) plugin that streams your test results to the k6 Cloud platform
-To know more about the metrics k6 collects, please see the [Metrics management page](/using-k6/metrics)
## Multiple outputs
-You can simultaneously send the emitted metrics to several outputs by using the CLI `--out` flag multiple times, for example:
+You can simultaneously send metrics to several outputs by using the CLI `--out` flag multiple times, for example:
-
+
```shell
-$ k6 run --out json=test.json --out influxdb=http://localhost:8086/k6
+$ k6 run \
+ --out json=test.json \
+ --out influxdb=http://localhost:8086/k6
```
## Summary export
-> _New in v0.26.0_
-
-It's also possible to export the end-of-test summary report to a JSON
-file that includes data for all test metrics, checks and thresholds.
-This is useful to get the aggregated test results in a
-machine-readable format, for integration with dashboards, external
-alerts, etc.
-
-### Example
-
-Running `k6 run --summary-export=export.json github.com/loadimpact/k6/samples/stages.js`
-would generate the following file:
-
-
+
+```shell
+$ k6 run --summary-export=export.json script.js
```
+
+> Read more about the summary on the [JSON plugin documentation](/results-visualization/json#summary-export)
\ No newline at end of file
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/Cloud.md b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/Cloud.md
deleted file mode 100755
index 4ef7ea704e..0000000000
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/Cloud.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: 'Cloud'
-excerpt: ''
----
-
-You can also stream your test results in real time to the k6 Cloud.
-
-[k6 Cloud](/cloud) provides support to automatically interpret and visualize your results.
-
-
-
-> ### `K6CLOUD_TOKEN` is now `K6_CLOUD_TOKEN`
->
-> Starting with v0.18.0 `K6CLOUD_TOKEN` has been renamed `K6_CLOUD_TOKEN`. The old spelling will still work in v0.18.0 but a deprecation message will be printed to the terminal.
-
-
-
-After running the command, the console shows the URL to access your test results.
-
-![cloud output img](images/cloud-output.png)
-
-You can read more about [k6 Cloud](/cloud)
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/InfluxDB.md b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/InfluxDB.md
deleted file mode 100755
index 3633f7e1a8..0000000000
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/InfluxDB.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-title: "InfluxDB"
-excerpt: ""
----
-
-Detailed statistics can also be sent directly to an [InfluxDB](https://github.com/influxdata/influxdb) instance:
-
-
-
-
-```shell
-k6 run --out influxdb=http://localhost:8086/k6 script.js
-````
-
-
-
-The above will make k6 connect to an InfluxDB instance listening to port 8086 on localhost, and insert all test results data into a database named "k6" (which will be created if it doesn't exist).
-
-Then you can use some other tool like [Grafana](https://grafana.com/) to visualize the data.
-
-Read more in this [Tutorial about using k6 with InfluxDB and Grafana](https://k6.io/blog/k6-loves-grafana/).
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/JSON.md b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/JSON.md
deleted file mode 100755
index 3d9726738a..0000000000
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/JSON.md
+++ /dev/null
@@ -1,97 +0,0 @@
----
-title: 'JSON'
-excerpt: ''
----
-
-You can also make k6 output detailed statistics in JSON format by using the `--out/-o` option for `k6 run`, like this:
-
-
-
-```shell
-k6 run --out json=my_test_result.json script.js
-```
-
-
-
-The JSON file will contain lines like these:
-
-
-
-Each line will either contain information about a metric, or log a data point (sample) for a metric. Lines consist of three items:
-
-- `type` - can have the values [Metric](#metric) or [Point](#point) where `Metric` means the line is declaring a metric, and `Point` is an actual data point (sample) for a metric.
-- `data` - is a dictionary that contains lots of stuff, varying depending on the `"type"` above.
-- `metric` - the name of the metric.
-
-## Metric
-
-This line contains information about the nature of a metric. Here, `"data"` will contain the following:
-
-- `"type"` - the metric type ("gauge", "rate", "counter" or "trend")
-- `"contains"` - information on the type of data collected (can e.g. be "time" for timing metrics)
-- `"tainted"` - has this metric caused a threshold to fail?
-- `"threshold"` - are there any thresholds attached to this metric?
-- `"submetrics"` - any derived metrics created as a result of adding a threshold using tags.
-
-## Point
-
-This line contains actual data samples. Here, `"data"` will contain these fields:
-
-- `"time"` - timestamp when the sample was collected
-- `"value"` - the actual data sample; time values are in milliseconds
-- `"tags"` - dictionary with tagname-tagvalue pairs that can be used when filtering results data
-
-## Processing JSON output
-
-We recommend using [jq][jq_url] to process the k6 JSON output. [jq][jq_url] is a lightweight and flexible command-line JSON processor.
-
-You can quickly create [filters][jq_filters_url] to return a particular metric of the JSON file:
-
-
-
-```shell
-jq '. | select(.type=="Point" and .metric == "http_req_duration" and .data.tags.status >= "200")' myscript-output.json
-```
-
-
-
-And calculate an aggregated value of any metric:
-
-
-
-```shell
-jq '. | select(.type=="Point" and .metric == "http_req_duration" and .data.tags.status >= "200") | .data.value' myscript-output.json | jq -s min
-```
-
-
-
-
-
-```shell
-jq '. | select(.type=="Point" and .metric == "http_req_duration" and .data.tags.status >= "200") | .data.value' myscript-output.json | jq -s max
-```
-
-
-
-For more advanced cases, check out the [jq Manual][jq_manual_url]
-
-[jq_url]: https://stedolan.github.io/jq/ 'jq_url'
-[jq_filters_url]: https://stedolan.github.io/jq/manual/#Basicfilters 'jq_filters_url'
-[jq_manual_url]: https://stedolan.github.io/jq/manual/ 'jq_manual_url'
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/cloud-output.png b/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/cloud-output.png
deleted file mode 100644
index a4643b25a9..0000000000
Binary files a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/cloud-output.png and /dev/null differ
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/images/k6-results-stdout.png b/src/data/markdown/docs/01 guides/01 Getting started/images/k6-results-stdout.png
new file mode 100644
index 0000000000..7875f1df5c
Binary files /dev/null and b/src/data/markdown/docs/01 guides/01 Getting started/images/k6-results-stdout.png differ
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/Apache Kafka.md b/src/data/markdown/docs/01 guides/03 Results visualization/Apache Kafka.md
similarity index 53%
rename from src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/Apache Kafka.md
rename to src/data/markdown/docs/01 guides/03 Results visualization/Apache Kafka.md
index 188b694c6d..8043292b9c 100755
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/Apache Kafka.md
+++ b/src/data/markdown/docs/01 guides/03 Results visualization/Apache Kafka.md
@@ -3,9 +3,15 @@ title: "Apache Kafka"
excerpt: ""
---
-You can also push the emitted metrics to [Apache Kafka](https://kafka.apache.org). You can configure the broker (or multiple ones), topic and message format directly from the command line parameter like this:
+[Apache Kafka](https://kafka.apache.org) is a stream-processing platform for handling real-time data.
-
+When running a test, k6 can send the metrics in real-time to Kafka.
+
+## Instructions
+
+You can configure the broker (or multiple ones), topic and message format directly from the command line parameter like this:
+
+
```shell
$ k6 run --out kafka=brokers=broker_host:8000,topic=k6
@@ -15,7 +21,7 @@ $ k6 run --out kafka=brokers=broker_host:8000,topic=k6
or if you want multiple brokers:
-
+
```shell
$ k6 --out kafka=brokers={broker1,broker2},topic=k6,format=json
@@ -26,7 +32,7 @@ $ k6 --out kafka=brokers={broker1,broker2},topic=k6,format=json
You can also specify the message `format` k6 will use. By default, it will be the same as the JSON output, but you can also use the InfluxDB line protocol for direct "consumption" by InfluxDB:
-
+
```shell
$ k6 --out kafka=brokers=someBroker,topic=someTopic,format=influxdb
@@ -36,10 +42,14 @@ $ k6 --out kafka=brokers=someBroker,topic=someTopic,format=influxdb
You can even modify some of the `format` settings such as `tagsAsFields`:
-
+
+## See also
+
+- [Integrating k6 with Apache Kafka](https://k6.io/blog/integrating-k6-with-apache-kafka)
\ No newline at end of file
diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/Cloud.md b/src/data/markdown/docs/01 guides/03 Results visualization/Cloud.md
new file mode 100755
index 0000000000..602c79d321
--- /dev/null
+++ b/src/data/markdown/docs/01 guides/03 Results visualization/Cloud.md
@@ -0,0 +1,76 @@
+---
+title: 'Cloud'
+excerpt: ''
+---
+Besides [running cloud tests](/cloud/creating-and-running-a-test/cloud-tests-from-the-cli), you can also stream your test results in real-time to the [k6 Cloud](/cloud).
+
+The k6 Cloud will pre-process your data, and you can visualize and analyze the results on the web app.
+
+## Instructions
+
+**1 (Optional) - Log in to the k6 Cloud**
+
+Assuming you have installed k6, the first step is to log in to k6 Cloud. You can use your [API token](https://app.k6.io/account/api-token) or username and password:
+
+
+
+**2 - Run the tests and upload the results**
+
+Now, k6 will authenticate you against the k6 Cloud, and you can use the `--out` option to send the k6 results to the k6 Cloud as:
+
+
+
+Alternatively, you could skip the `k6 login` command when using your [API token](https://app.k6.io/account/api-token) with the `k6 run` command as:
+
+
+
+![k6 Cloud Test Results](/images/k6-cloud-results.png)
+
+> **Analyzing results**
+>
+>
+>
+> If you want to know how the k6 Cloud analyzes the results and its visualization features. Read [Analyzing results on the k6 Cloud](/cloud/analyzing-results/overview).
+
+
+## See also
+
+- [Analyzing results on the k6 Cloud](/cloud/analyzing-results/overview)
+- [Running tests under a different project than your default one](/using-k6/cloud-execution#running-tests-under-a-different-project-than-your-default-one)
+- [Running cloud tests](/cloud/creating-and-running-a-test/cloud-tests-from-the-cli)
+- [Cloud test run status - Uploading results](/cloud/cloud-faq/test-status-codes#uploading-results)
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/DataDog.md b/src/data/markdown/docs/01 guides/03 Results visualization/DataDog.md
similarity index 82%
rename from src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/DataDog.md
rename to src/data/markdown/docs/01 guides/03 Results visualization/DataDog.md
index 1697d242b8..7f4d35b4f5 100755
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/DataDog.md
+++ b/src/data/markdown/docs/01 guides/03 Results visualization/DataDog.md
@@ -36,7 +36,7 @@ docker run -d \
-**Note**: Replace `` with your [Datadog API key](https://app.datadoghq.com/account/settings#api).
+Replace `` with your [Datadog API key](https://app.datadoghq.com/account/settings#api).
If your account is registered with Datadog EU, change the value of `DD_SITE` to `datadoghq.eu`.
@@ -48,7 +48,7 @@ For additional information, read the
```shell
-k6 run --out datadog script.js
+$ k6 run --out datadog script.js
```
@@ -89,7 +89,7 @@ To learn more about all the types of k6 metrics, read the [k6 Metrics guide](/us
-**Note**: the first time Datadog detects the `k6.http_reqs` metric, the k6 integration tile is installed automatically, and the default k6 dashboard is added to your dashboard list.
+The first time Datadog detects the `k6.http_reqs` metric, the k6 integration tile is installed automatically, and the default k6 dashboard is added to your dashboard list.
![k6 Datadog Dashboard](images/k6-datadog-dashboard.png)
@@ -100,6 +100,3 @@ Optionally, you can install the k6 integration tile following these instructions
3. Search for `k6`, then select the `k6` integration.
4. Click on the `Configuration` tab option.
5. Scroll down and click on the `Install integration` button.
-
-
-
diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/InfluxDB - Grafana.md b/src/data/markdown/docs/01 guides/03 Results visualization/InfluxDB - Grafana.md
index a9597f4962..089f220ecd 100644
--- a/src/data/markdown/docs/01 guides/03 Results visualization/InfluxDB - Grafana.md
+++ b/src/data/markdown/docs/01 guides/03 Results visualization/InfluxDB - Grafana.md
@@ -3,8 +3,9 @@ title: 'InfluxDB + Grafana'
excerpt: ''
---
-Want some graphs? It's simpler than you think, using InfluxDB for data storage and Grafana
-for visualization.
+You can use [Grafana](https://grafana.com/grafana/) for visualization of your k6 metrics.
+
+To use Grafana, you have to setup k6 to send the test result metrics to an [InfluxDB](https://github.com/influxdata/influxdb) instance and configure Grafana to query the [k6 metrics](/using-k6/metrics) from InfluxDB.
![Grafana Visualization](images/grafana-visualization.png)
@@ -45,7 +46,8 @@ $ brew install grafana
_After this, you should have an InfluxDB server running on localhost, listening on port 8086,
and a Grafana server on `http://localhost:3000_`
-## Using InfluxDB to store results
+## Run the test and upload the results to InfluxDB
+
k6 has built-in support for outputting results data directly to an InfluxDB database using
the `--out` (`-o`) switch:
@@ -84,7 +86,23 @@ create results visualizations.
statement to edit the metric:
![Edit metric](images/grafana-edit-metric.png)
-## Using our docker-compose setup
+## Preconfigured Grafana dashboards
+
+Here we will list premade Grafana dashboard configurations contributed by users, for use with k6.
+
+- [dcadwallader](https://grafana.com/grafana/dashboards/2587)
+- [Stian Øvrevåge](https://grafana.com/grafana/dashboards/4411)
+- [cyaiox](https://grafana.com/grafana/dashboards/8156)
+- [smockvavelsky](https://grafana.com/grafana/dashboards/10553)
+- [k m](https://grafana.com/grafana/dashboards/10660)
+
+To enable a contributed Grafana dashboard is simple: you just choose to "import" a dashboard in the Grafana UI and then specify the ID number of the dashboard you want, see http://docs.grafana.org/reference/export_import/ for more details.
+
+
+![](images/grafana-dave.png)
+
+
+### Using our docker-compose setup
To make all the above even simpler, we have created a docker-compose setup that will:
@@ -110,12 +128,6 @@ $ docker-compose run -v \
Now you should be able to connect to localhost on port 3000 with your browser and access the
Grafana installation in the Docker container.
-## Preconfigured Grafana dashboards
-
-Here we will list premade Grafana dashboard configurations contributed by users, for use
-with k6. To enable a contributed Grafana dashboard is simple: you just choose to "import"
-a dashboard in the Grafana UI and then specify the ID number of the dashboard you want,
-see http://docs.grafana.org/reference/export_import/ for more details.
-
-![ID 2587, by Dave Cadwallader](images/grafana-dave.png)
-_https://grafana.com/dashboards/2587_
+## See also
+- [Tutorial about using k6 with InfluxDB and Grafana](https://k6.io/blog/k6-loves-grafana/)
+- [Comparison of k6 test result visualizations](https://k6.io/blog/comparison-of-k6-test-result-visualizations)
diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/JSON.md b/src/data/markdown/docs/01 guides/03 Results visualization/JSON.md
new file mode 100755
index 0000000000..4ffe3ec63f
--- /dev/null
+++ b/src/data/markdown/docs/01 guides/03 Results visualization/JSON.md
@@ -0,0 +1,247 @@
+---
+title: 'JSON'
+excerpt: ''
+---
+
+You can also make k6 output detailed statistics in JSON format by using the `--out/-o` option for `k6 run`, like this:
+
+
+
+Each line will either contain information about a metric, or log a data point (sample) for a metric. Lines consist of three items:
+
+- `type` - can have the values [Metric](#metric) or [Point](#point) where `Metric` means the line is declaring a metric, and `Point` is an actual data point (sample) for a metric.
+- `data` - is a dictionary that contains lots of stuff, varying depending on the `"type"` above.
+- `metric` - the name of the metric.
+
+### Metric
+
+This line contains information about the nature of a metric. Here, `"data"` will contain the following:
+
+- `"type"` - the metric type ("gauge", "rate", "counter" or "trend")
+- `"contains"` - information on the type of data collected (can e.g. be "time" for timing metrics)
+- `"tainted"` - has this metric caused a threshold to fail?
+- `"threshold"` - are there any thresholds attached to this metric?
+- `"submetrics"` - any derived metrics created as a result of adding a threshold using tags.
+
+### Point
+
+This line contains actual data samples. Here, `"data"` will contain these fields:
+
+- `"time"` - timestamp when the sample was collected
+- `"value"` - the actual data sample; time values are in milliseconds
+- `"tags"` - dictionary with tagname-tagvalue pairs that can be used when filtering results data
+
+## Processing JSON output
+
+We recommend using [jq][jq_url] to process the k6 JSON output. [jq][jq_url] is a lightweight and flexible command-line JSON processor.
+
+You can quickly create [filters][jq_filters_url] to return a particular metric of the JSON file:
+
+
+
+```shell
+$ jq '. | select(.type=="Point" and .metric == "http_req_duration" and .data.tags.status >= "200")' myscript-output.json
+```
+
+
+
+And calculate an aggregated value of any metric:
+
+
+
+```shell
+$ jq '. | select(.type=="Point" and .metric == "http_req_duration" and .data.tags.status >= "200") | .data.value' myscript-output.json | jq -s min
+```
+
+
+
+
+
+```shell
+$ jq '. | select(.type=="Point" and .metric == "http_req_duration" and .data.tags.status >= "200") | .data.value' myscript-output.json | jq -s max
+```
+
+
+
+For more advanced cases, check out the [jq Manual][jq_manual_url]
+
+[jq_url]: https://stedolan.github.io/jq/ 'jq_url'
+[jq_filters_url]: https://stedolan.github.io/jq/manual/#Basicfilters 'jq_filters_url'
+[jq_manual_url]: https://stedolan.github.io/jq/manual/ 'jq_manual_url'
+
+## Summary export
+
+> _New in v0.26.0_
+
+The `--summary-export` option of the `k6 run` command can export the end-of-test summary report to a JSON file that includes data for all test metrics, checks and thresholds.
+
+This is useful to get the aggregated test results in a machine-readable format, for integration with dashboards, external alerts, etc.
+
+
+
+```shell
+$ k6 run --summary-export=export.json script.js
+```
+
+```shell
+# you can use the `summary-export` option with other output (Kafka, Datadog, Cloud, InfluxDB, JSON...)
+$ k6 run --summary-export=export.json --out datadog script.js
+```
+
+
+
+## See also
+
+- [Metrics](/using-k6/metrics)
\ No newline at end of file
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/StatsD.md b/src/data/markdown/docs/01 guides/03 Results visualization/StatsD.md
similarity index 95%
rename from src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/StatsD.md
rename to src/data/markdown/docs/01 guides/03 Results visualization/StatsD.md
index ff24e6cb8c..08703ed774 100755
--- a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/StatsD.md
+++ b/src/data/markdown/docs/01 guides/03 Results visualization/StatsD.md
@@ -8,7 +8,7 @@ k6 can also push the metrics to a [StatsD](https://github.com/statsd/statsd) ser
```shell
-k6 run --out statsd script.js
+$ k6 run --out statsd script.js
```
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/dashboard-listing-with-k6-dashboard.png b/src/data/markdown/docs/01 guides/03 Results visualization/images/dashboard-listing-with-k6-dashboard.png
similarity index 100%
rename from src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/dashboard-listing-with-k6-dashboard.png
rename to src/data/markdown/docs/01 guides/03 Results visualization/images/dashboard-listing-with-k6-dashboard.png
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/datadog-performance-testing-metrics.png b/src/data/markdown/docs/01 guides/03 Results visualization/images/datadog-performance-testing-metrics.png
similarity index 100%
rename from src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/datadog-performance-testing-metrics.png
rename to src/data/markdown/docs/01 guides/03 Results visualization/images/datadog-performance-testing-metrics.png
diff --git a/src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/k6-datadog-dashboard.png b/src/data/markdown/docs/01 guides/03 Results visualization/images/k6-datadog-dashboard.png
similarity index 100%
rename from src/data/markdown/docs/01 guides/01 Getting started/04 Results Output/images/k6-datadog-dashboard.png
rename to src/data/markdown/docs/01 guides/03 Results visualization/images/k6-datadog-dashboard.png
diff --git a/src/data/markdown/docs/01 guides/03 Results visualization/k6 Cloud Test Results.md b/src/data/markdown/docs/01 guides/03 Results visualization/k6 Cloud Test Results.md
deleted file mode 100644
index f7f13fb3da..0000000000
--- a/src/data/markdown/docs/01 guides/03 Results visualization/k6 Cloud Test Results.md
+++ /dev/null
@@ -1,136 +0,0 @@
----
-title: "k6 Cloud Test Results"
-excerpt: ""
----
-
-## Insights introduction
-
-We offer a SaaS solution for k6 results storage, analysis and trending. It is
-called _[k6 Cloud Test Results](https://k6.io/cloud/)_. Test Results allows you to run
-your load tests locally using k6 while streaming your results to [k6.io](https://k6.io)
-where you can look at the test in real-time and also perform detailed results analysis.
-
-As part of its SaaS solution, k6 Cloud also offers
-[distributed execution from the cloud](/using-k6/cloud-execution), to run larger and/or
-geographically distributed tests.
-
-![k6 Cloud Test Results](/images/k6-cloud-results.png)
-
-This means you can run your own load generator and test internal systems, but benefit from a more
-powerful and convenient SaaS solution when you want to store, share and analyze test results.
-
-You can also utilize the k6.io cloud to *execute* tests (but such tests will require that
-the target system is reachable from the public Internet).
-
-## Getting started with k6 and k6 Cloud Insights
-
-- First you need to [create a k6.io account](https://app.k6.io/account/register)
-
-- Once logged in, click the "CLI" link in the left menu (at the bottom).
-
-![onboarding screen](/images/cli-get-started.png)
-
-- Follow the instructions on screen to get k6 installed and setup for steaming test results.
-
-![CLI Screen](/images/cli-instructions.png)
-
-If you have already installed k6 you only need to make sure you have it configured with access to
-your k6 Cloud API token. You do that by either running `k6 login cloud` and entering your k6 Cloud
-account details or you set the `K6_CLOUD_TOKEN` to [your API token value](https://app.k6.io/account/api-token)
-(you can also use the API token to login from the command-line using
-`k6 login cloud -t `).
-
-Don't forget to add `-o cloud` to specify that you want the test results streamed to Insights!
-
-
-
-
-```shell
-$ K6_CLOUD_TOKEN=c3b391149764640ed7d51476cd34a947f0d0762552a5bae79ee10b07ee84c1f7 k6 run -o cloud script.js
-```
-
-```shell
-$ docker run -i \
- -e "K6CLOUD_TOKEN=c3b391149764640ed7d51476cd34a947f0d0762552a5bae79ee10b07ee84c1f7" \
- loadimpact/k6 \
- run -o cloud -
-
-Now your test should be running, and results should be streamed live to k6.io. k6 will output
-something like this when it starts:
-
-![k6 Cloud Test Results CLI Output](images/k6-cloud-output.png)
-
-As you can see in the screen shot above, k6 will tell you that you can use the URL
-`https://app.k6.io/runs/123` to go directly to the test result/analysis page where
-results will be updated continuously throughout the test.
-
-## Streaming results to a project other than the default one
-
-By default tests and test runs will be created and run under your default project, in your default
-organization. To create and run tests under a different project, whether under your default
-organization or one you've been invited to, you have two options:
-
-1. You can specify the project ID in the script options:
-
-
-
-2. You can set the `K6_CLOUD_PROJECT_ID` environment variable when running the test.
-
- You find the ID of a k6 Cloud project by selecting it in the UI and looking
- in the URL bar of your browser, the `12345` in `https://app.k6.io/projects/12345`
- is the project ID.
-
-## Insight service run states
-
-When you run a k6 test run against k6 Cloud Insights, data will be continuously streamed
-to the cloud. While this happens the test's *run state* will be marked as `Running`. A test run
-that ran its course will be marked `Finished`. The run state has nothing to do with the test
-passing any of its checks, only that the test itself is operating correctly.
-
-If you deliberately abort your test (e.g. by pressing *Ctrl-C*), it will still be considered
-`Finished` by Insights. You can still look and analyze the test data you streamed so far. The test
-will just have run shorter than originally planned.
-
-Another possibility would be if you lose network connection with k6 Cloud Insights while your
-test is running. In that case Insights will patiently wait for you to reconnect. In the meanwhile
-your test's run state will continue to appear as `Running` on the Insights web overview. If no
-reconnection happens,
-
-Insights will time out after two minutes of no data, setting the run state to `Timed out`. You can
-still analyze a timed out test but you'll of course only have access to as much data as was
-streamed before the network issue.
-
-## Aggregation
-
-Since version 0.21.0, k6 supports partial aggregation of metrics streamed to k6 Cloud Test Results
-for reduced bandwidth usage and processing times. To enable this, set the environment
-variable `K6_CLOUD_AGGREGATION_PERIOD` to the aggregation period you want, for example `1s`. If
-there are more than a certain number of HTTP metrics for a period (100 by default, but it can be
-modified by setting `K6_CLOUD_AGGREGATION_MIN_SAMPLES`), they are partially aggregated. It is
-important to note that outlier metrics are automatically detected and sent separately, they are
-never aggregated.
-
-## Cloud/Distributed execution
-
-k6 also offers a commercial cloud service, k6 Cloud, for distributed execution of k6 tests. See
-the [Cloud execution](/cloud) page for more information.
-
-## More information
-
-- [k6 Cloud Test Results at k6.io](https://k6.io/cloud)
-- Detailed information about k6 Cloud Test Results [analysis/results view](/cloud)
diff --git a/src/data/markdown/docs/01 guides/05 Testing Guides/03 Automated Performance testing.md b/src/data/markdown/docs/01 guides/05 Testing Guides/03 Automated Performance testing.md
index df420c073d..1ba2a89fbf 100644
--- a/src/data/markdown/docs/01 guides/05 Testing Guides/03 Automated Performance testing.md
+++ b/src/data/markdown/docs/01 guides/05 Testing Guides/03 Automated Performance testing.md
@@ -271,7 +271,7 @@ If you’re running your tests from k6 Cloud then you can make use of our [Slack
We have written CI tool specific guides following the steps mentioned above:
-
## See also
diff --git a/src/data/markdown/docs/03 cloud/06 Cloud FAQ/11 Test status codes.md b/src/data/markdown/docs/03 cloud/06 Cloud FAQ/11 Test status codes.md
index 580be63b4e..50d273e124 100644
--- a/src/data/markdown/docs/03 cloud/06 Cloud FAQ/11 Test status codes.md
+++ b/src/data/markdown/docs/03 cloud/06 Cloud FAQ/11 Test status codes.md
@@ -72,3 +72,13 @@ A test that has exceeded one or more of the following limits:
- The max VUs is higher than 20,000 VUs (for tests higher than 20k, please contact us)
If your test has too many groups, please reduce their number. If your test has too many metrics, please use URL grouping to combine similar URLs. You should also remove external requests from your test script. Each URL captured will account for 7 individual metrics that we keep track of. External requests can quickly produce a large number of metrics that aren't helpful to the understanding performance of the System Under Test.
+
+## Uploading results
+
+When you send the [k6 results to the k6 Cloud](/results-visualization/cloud), data will be continuously streamed to the cloud. While this happens the state of the test run will be marked as `Running`. A test run that ran its course will be marked `Finished`. The run state has nothing to do with the test passing any thresholds, only that the test itself is operating correctly.
+
+If you deliberately abort your test (e.g. by pressing *Ctrl-C*), it will still be considered `Finished`. You can still look and analyze the test data you streamed so far. The test will just have run shorter than originally planned.
+
+Another possibility would be if you lose network connection with the k6 Cloud while your test is running. In that case the k6 Cloud will patiently wait for you to reconnect. In the meanwhile your test's run state will continue to appear as `Running` on the web app.
+
+If no reconnection happens, the k6 Cloud will time out after two minutes of no data, setting the run state to `Timed out`. You can still analyze a timed out test but you'll of course only have access to as much data as was streamed before the network issue.
\ No newline at end of file
diff --git a/src/data/markdown/docs/04 integrations/02 CI tools/04 azure.md b/src/data/markdown/docs/04 integrations/02 CI tools/01 azure.md
similarity index 100%
rename from src/data/markdown/docs/04 integrations/02 CI tools/04 azure.md
rename to src/data/markdown/docs/04 integrations/02 CI tools/01 azure.md
diff --git a/src/data/markdown/docs/04 integrations/02 CI tools/02 circleci.md b/src/data/markdown/docs/04 integrations/02 CI tools/02 circleci.md
index 7860963f99..aadb5d1a22 100644
--- a/src/data/markdown/docs/04 integrations/02 CI tools/02 circleci.md
+++ b/src/data/markdown/docs/04 integrations/02 CI tools/02 circleci.md
@@ -1,4 +1,4 @@
---
-title: 'Circleci'
+title: 'CircleCI'
redirect: 'https://k6.io/blog/integrating-load-testing-with-circleci'
---
diff --git a/src/data/markdown/docs/04 integrations/02 CI tools/06 github.md b/src/data/markdown/docs/04 integrations/02 CI tools/03 github.md
similarity index 100%
rename from src/data/markdown/docs/04 integrations/02 CI tools/06 github.md
rename to src/data/markdown/docs/04 integrations/02 CI tools/03 github.md
diff --git a/src/data/markdown/docs/04 integrations/02 CI tools/03 gitlab.md b/src/data/markdown/docs/04 integrations/02 CI tools/04 gitlab.md
similarity index 82%
rename from src/data/markdown/docs/04 integrations/02 CI tools/03 gitlab.md
rename to src/data/markdown/docs/04 integrations/02 CI tools/04 gitlab.md
index 93a7152738..526c9b7c6f 100644
--- a/src/data/markdown/docs/04 integrations/02 CI tools/03 gitlab.md
+++ b/src/data/markdown/docs/04 integrations/02 CI tools/04 gitlab.md
@@ -1,4 +1,4 @@
---
-title: 'Gitlab'
+title: 'GitLab'
redirect: 'https://k6.io/blog/integrating-load-testing-with-gitlab'
---
diff --git a/src/data/markdown/docs/04 integrations/02 CI tools/01 jenkins.md b/src/data/markdown/docs/04 integrations/02 CI tools/05 jenkins.md
similarity index 100%
rename from src/data/markdown/docs/04 integrations/02 CI tools/01 jenkins.md
rename to src/data/markdown/docs/04 integrations/02 CI tools/05 jenkins.md
diff --git a/src/data/markdown/docs/04 integrations/02 CI tools/05 teamcity.md b/src/data/markdown/docs/04 integrations/02 CI tools/06 teamcity.md
similarity index 100%
rename from src/data/markdown/docs/04 integrations/02 CI tools/05 teamcity.md
rename to src/data/markdown/docs/04 integrations/02 CI tools/06 teamcity.md
diff --git a/src/data/markdown/docs/04 integrations/04 Result Store/01 json.md b/src/data/markdown/docs/04 integrations/04 Result Store/01 json.md
deleted file mode 100644
index 11d4184446..0000000000
--- a/src/data/markdown/docs/04 integrations/04 Result Store/01 json.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: 'JSON'
-redirect: 'https://k6.io/docs/getting-started/results-output/json'
----
diff --git a/src/data/markdown/docs/04 integrations/04 Result Store/02 influxdb-grafana.md b/src/data/markdown/docs/04 integrations/04 Result Store/02 influxdb-grafana.md
deleted file mode 100644
index 68657cad97..0000000000
--- a/src/data/markdown/docs/04 integrations/04 Result Store/02 influxdb-grafana.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: 'InfluxDB/Grafana'
-redirect: 'https://k6.io/docs/getting-started/results-output/influxdb'
----
diff --git a/src/data/markdown/docs/04 integrations/04 Result Store/03 kafka.md b/src/data/markdown/docs/04 integrations/04 Result Store/03 kafka.md
deleted file mode 100644
index 7411be05f4..0000000000
--- a/src/data/markdown/docs/04 integrations/04 Result Store/03 kafka.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: 'Apache Kafka'
-redirect: 'https://k6.io/docs/getting-started/results-output/apache-kafka'
----
diff --git a/src/data/markdown/docs/04 integrations/04 Result Store/04 datadog.md b/src/data/markdown/docs/04 integrations/04 Result Store/04 datadog.md
deleted file mode 100644
index 1f285f360d..0000000000
--- a/src/data/markdown/docs/04 integrations/04 Result Store/04 datadog.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: 'DataDog'
-redirect: 'https://k6.io/docs/getting-started/results-output/datadog'
----
diff --git a/src/data/markdown/docs/04 integrations/04 Result Store/05 cloud-service.md b/src/data/markdown/docs/04 integrations/04 Result Store/05 cloud-service.md
deleted file mode 100644
index bc462e7c1c..0000000000
--- a/src/data/markdown/docs/04 integrations/04 Result Store/05 cloud-service.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: 'k6 Cloud'
-redirect: 'https://k6.io/docs/getting-started/results-output/cloud'
----
diff --git a/src/data/markdown/docs/04 integrations/04 Results visualization/01 kafka.md b/src/data/markdown/docs/04 integrations/04 Results visualization/01 kafka.md
new file mode 100644
index 0000000000..21e9314c0e
--- /dev/null
+++ b/src/data/markdown/docs/04 integrations/04 Results visualization/01 kafka.md
@@ -0,0 +1,4 @@
+---
+title: 'Apache Kafka'
+redirect: 'https://k6.io/docs/results-visualization/apache-kafka'
+---
diff --git a/src/data/markdown/docs/04 integrations/04 Results visualization/02 cloud-service.md b/src/data/markdown/docs/04 integrations/04 Results visualization/02 cloud-service.md
new file mode 100644
index 0000000000..bbec118a89
--- /dev/null
+++ b/src/data/markdown/docs/04 integrations/04 Results visualization/02 cloud-service.md
@@ -0,0 +1,4 @@
+---
+title: 'Cloud'
+redirect: 'https://k6.io/docs/results-visualization/cloud'
+---
diff --git a/src/data/markdown/docs/04 integrations/04 Results visualization/03 datadog.md b/src/data/markdown/docs/04 integrations/04 Results visualization/03 datadog.md
new file mode 100644
index 0000000000..4706ef7230
--- /dev/null
+++ b/src/data/markdown/docs/04 integrations/04 Results visualization/03 datadog.md
@@ -0,0 +1,4 @@
+---
+title: 'DataDog'
+redirect: 'https://k6.io/docs/results-visualization/datadog'
+---
diff --git a/src/data/markdown/docs/04 integrations/04 Results visualization/04 influxdb-grafana.md b/src/data/markdown/docs/04 integrations/04 Results visualization/04 influxdb-grafana.md
new file mode 100644
index 0000000000..da7337d442
--- /dev/null
+++ b/src/data/markdown/docs/04 integrations/04 Results visualization/04 influxdb-grafana.md
@@ -0,0 +1,4 @@
+---
+title: 'InfluxDB + Grafana'
+redirect: 'https://k6.io/docs/results-visualization/influxdb-+-grafana'
+---
diff --git a/src/data/markdown/docs/04 integrations/04 Results visualization/05 json.md b/src/data/markdown/docs/04 integrations/04 Results visualization/05 json.md
new file mode 100644
index 0000000000..8dea41c810
--- /dev/null
+++ b/src/data/markdown/docs/04 integrations/04 Results visualization/05 json.md
@@ -0,0 +1,4 @@
+---
+title: 'JSON'
+redirect: 'https://k6.io/docs/results-visualization/json'
+---
diff --git a/src/data/markdown/docs/04 integrations/04 Results visualization/06 statsd.md b/src/data/markdown/docs/04 integrations/04 Results visualization/06 statsd.md
new file mode 100644
index 0000000000..410a788edd
--- /dev/null
+++ b/src/data/markdown/docs/04 integrations/04 Results visualization/06 statsd.md
@@ -0,0 +1,4 @@
+---
+title: 'StatsD'
+redirect: 'https://k6.io/docs/getting-started/results-output/statsd'
+---
diff --git a/src/svg/kafka.inline.svg b/src/svg/kafka.inline.svg
index 16aee60acb..a603b732f8 100644
--- a/src/svg/kafka.inline.svg
+++ b/src/svg/kafka.inline.svg
@@ -1 +1 @@
-
+
diff --git a/src/svg/statsd.inline.svg b/src/svg/statsd.inline.svg
new file mode 100644
index 0000000000..687022b3a8
--- /dev/null
+++ b/src/svg/statsd.inline.svg
@@ -0,0 +1,79 @@
+
+
+
\ No newline at end of file
diff --git a/src/templates/docs/integrations.js b/src/templates/docs/integrations.js
index 37cea7c460..930223e7b3 100644
--- a/src/templates/docs/integrations.js
+++ b/src/templates/docs/integrations.js
@@ -26,6 +26,7 @@ import Grafana from 'svg/grafana.inline.svg';
import Influx from 'svg/influx.inline.svg';
import Datadog from 'svg/datadog.inline.svg';
import Kafka from 'svg/kafka.inline.svg';
+import StatsD from 'svg/statsd.inline.svg';
import Loadimpact from 'svg/loadimpact.inline.svg';
import SeoMetadata from 'utils/seo-metadata';
import { blog, main } from 'utils/urls';
@@ -194,35 +195,35 @@ export default function({ pageContext: { sidebarTree, navLinks } }) {
}
iconsData={[
{
- Icon: Jenkins,
- name: 'Jenkins',
- link: `${blog}/integrating-load-testing-with-jenkins`,
+ Icon: Azure,
+ name: 'Azure Pipelines',
+ link: `${blog}/integrating-load-testing-with-azure-pipelines`,
},
{
Icon: CircleCI,
name: 'CircleCI',
link: `${blog}/integrating-load-testing-with-circleci`,
},
+ {
+ Icon: GitHub,
+ name: 'GitHub Actions',
+ link: `${blog}/load-testing-using-github-actions`,
+ },
{
Icon: Gitlab,
- name: 'Gitlab',
+ name: 'GitLab',
link: `${blog}/integrating-load-testing-with-gitlab`,
},
{
- Icon: Azure,
- name: 'Azure Pipelines',
- link: `${blog}/integrating-load-testing-with-azure-pipelines`,
+ Icon: Jenkins,
+ name: 'Jenkins',
+ link: `${blog}/integrating-load-testing-with-jenkins`,
},
{
Icon: TeamCity,
name: 'TeamCity',
link: `${blog}/load-testing-using-teamcity-and-k6`,
},
- {
- Icon: GitHub,
- name: 'GitHub Actions',
- link: `${blog}/load-testing-using-github-actions`,
- },
]}
/>
(
@@ -265,26 +276,24 @@ export default function({ pageContext: { sidebarTree, navLinks } }) {