diff --git a/.buildkite/pull_requests.json b/.buildkite/pull_requests.json index 1a4c6fec293e8..cc80a7b2fc27e 100644 --- a/.buildkite/pull_requests.json +++ b/.buildkite/pull_requests.json @@ -36,7 +36,8 @@ "always_require_ci_on_changed": [ "^docs/developer/plugin-list.asciidoc$", "/plugins/[^/]+/readme\\.(md|asciidoc)$" - ] + ], + "kibana_versions_check": true } ] } diff --git a/.buildkite/scripts/steps/artifacts/cloud.sh b/.buildkite/scripts/steps/artifacts/cloud.sh index da90306f3f63d..7088228bd9030 100644 --- a/.buildkite/scripts/steps/artifacts/cloud.sh +++ b/.buildkite/scripts/steps/artifacts/cloud.sh @@ -56,8 +56,8 @@ CLOUD_DEPLOYMENT_PASSWORD=$(jq -r --slurp '.[]|select(.resources).resources[] | CLOUD_DEPLOYMENT_ID=$(jq -r --slurp '.[0].id' "$LOGS") CLOUD_DEPLOYMENT_STATUS_MESSAGES=$(jq --slurp '[.[]|select(.resources == null)]' "$LOGS") -CLOUD_DEPLOYMENT_KIBANA_URL=$(ecctl deployment show "$CLOUD_DEPLOYMENT_ID" | jq -r '.resources.kibana[0].info.metadata.aliased_url') -CLOUD_DEPLOYMENT_ELASTICSEARCH_URL=$(ecctl deployment show "$CLOUD_DEPLOYMENT_ID" | jq -r '.resources.elasticsearch[0].info.metadata.aliased_url') +export CLOUD_DEPLOYMENT_KIBANA_URL=$(ecctl deployment show "$CLOUD_DEPLOYMENT_ID" | jq -r '.resources.kibana[0].info.metadata.aliased_url') +export CLOUD_DEPLOYMENT_ELASTICSEARCH_URL=$(ecctl deployment show "$CLOUD_DEPLOYMENT_ID" | jq -r '.resources.elasticsearch[0].info.metadata.aliased_url') echo "Kibana: $CLOUD_DEPLOYMENT_KIBANA_URL" echo "ES: $CLOUD_DEPLOYMENT_ELASTICSEARCH_URL" @@ -68,15 +68,15 @@ function shutdown { } trap "shutdown" EXIT -export TEST_KIBANA_PROTOCOL=$(node -e "console.log(new URL('$CLOUD_DEPLOYMENT_KIBANA_URL').protocol.replace(':', ''))") -export TEST_KIBANA_HOSTNAME=$(node -e "console.log(new URL('$CLOUD_DEPLOYMENT_KIBANA_URL').hostname)") -export TEST_KIBANA_PORT=$(node -e "console.log(new URL('$CLOUD_DEPLOYMENT_KIBANA_URL').port)") +export TEST_KIBANA_PROTOCOL=$(node -e "console.log(new URL(process.env.CLOUD_DEPLOYMENT_KIBANA_URL).protocol.replace(':', ''))") +export TEST_KIBANA_HOSTNAME=$(node -e "console.log(new URL(process.env.CLOUD_DEPLOYMENT_KIBANA_URL).hostname)") +export TEST_KIBANA_PORT=$(node -e "console.log(new URL(process.env.CLOUD_DEPLOYMENT_KIBANA_URL).port || 443)") export TEST_KIBANA_USERNAME="$CLOUD_DEPLOYMENT_USERNAME" export TEST_KIBANA_PASSWORD="$CLOUD_DEPLOYMENT_PASSWORD" -export TEST_ES_PROTOCOL=$(node -e "console.log(new URL('$CLOUD_DEPLOYMENT_ELASTICSEARCH_URL').protocol.replace(':', ''))") -export TEST_ES_HOSTNAME=$(node -e "console.log(new URL('$CLOUD_DEPLOYMENT_ELASTICSEARCH_URL').hostname)") -export TEST_ES_PORT=$(node -e "console.log(new URL('$CLOUD_DEPLOYMENT_ELASTICSEARCH_URL').port)") +export TEST_ES_PROTOCOL=$(node -e "console.log(new URL(process.env.CLOUD_DEPLOYMENT_ELASTICSEARCH_URL).protocol.replace(':', ''))") +export TEST_ES_HOSTNAME=$(node -e "console.log(new URL(process.env.CLOUD_DEPLOYMENT_ELASTICSEARCH_URL).hostname)") +export TEST_ES_PORT=$(node -e "console.log(new URL(process.env.CLOUD_DEPLOYMENT_ELASTICSEARCH_URL).port || 443)") export TEST_ES_USERNAME="$CLOUD_DEPLOYMENT_USERNAME" export TEST_ES_PASSWORD="$CLOUD_DEPLOYMENT_PASSWORD" diff --git a/.buildkite/scripts/steps/artifacts/docker_context.sh b/.buildkite/scripts/steps/artifacts/docker_context.sh index d7bbe2ecb27cf..1195d7ad5dc38 100755 --- a/.buildkite/scripts/steps/artifacts/docker_context.sh +++ b/.buildkite/scripts/steps/artifacts/docker_context.sh @@ -26,5 +26,7 @@ fi tar -xf "target/$DOCKER_CONTEXT_FILE" -C "$DOCKER_BUILD_FOLDER" cd $DOCKER_BUILD_FOLDER +buildkite-agent artifact download "kibana-$FULL_VERSION-linux-x86_64.tar.gz" . --build "${KIBANA_BUILD_ID:-$BUILDKITE_BUILD_ID}" + echo "--- Build context" docker build . diff --git a/.buildkite/scripts/steps/code_coverage/ftr_configs.sh b/.buildkite/scripts/steps/code_coverage/ftr_configs.sh index 393b1fbe1c1d3..58b17791cbea8 100755 --- a/.buildkite/scripts/steps/code_coverage/ftr_configs.sh +++ b/.buildkite/scripts/steps/code_coverage/ftr_configs.sh @@ -116,7 +116,7 @@ printf "%s\n" "${results[@]}" echo "" # So the last step "knows" this config ran -uploadRanFile "ftr_configs" +uploadRanFile "functional" # Force exit 0 to ensure the next build step starts. exit 0 diff --git a/.buildkite/scripts/steps/code_coverage/ingest.sh b/.buildkite/scripts/steps/code_coverage/ingest.sh index a39097f706262..34d54c4d61b09 100755 --- a/.buildkite/scripts/steps/code_coverage/ingest.sh +++ b/.buildkite/scripts/steps/code_coverage/ingest.sh @@ -8,59 +8,103 @@ source .buildkite/scripts/steps/code_coverage/merge.sh export CODE_COVERAGE=1 echo "--- Reading Kibana stats cluster creds from vault" -export USER_FROM_VAULT="$(retry 5 5 vault read -field=username secret/kibana-issues/prod/coverage/elasticsearch)" -export PASS_FROM_VAULT="$(retry 5 5 vault read -field=password secret/kibana-issues/prod/coverage/elasticsearch)" -export HOST_FROM_VAULT="$(retry 5 5 vault read -field=host secret/kibana-issues/prod/coverage/elasticsearch)" -export TIME_STAMP=$(date +"%Y-%m-%dT%H:%M:00Z") - -echo "--- Print KIBANA_DIR" -echo "### KIBANA_DIR: $KIBANA_DIR" +USER_FROM_VAULT="$(retry 5 5 vault read -field=username secret/kibana-issues/prod/coverage/elasticsearch)" +export USER_FROM_VAULT +PASS_FROM_VAULT="$(retry 5 5 vault read -field=password secret/kibana-issues/prod/coverage/elasticsearch)" +export PASS_FROM_VAULT +HOST_FROM_VAULT="$(retry 5 5 vault read -field=host secret/kibana-issues/prod/coverage/elasticsearch)" +export HOST_FROM_VAULT +TIME_STAMP=$(date +"%Y-%m-%dT%H:%M:00Z") +export TIME_STAMP echo "--- Download previous git sha" .buildkite/scripts/steps/code_coverage/reporting/downloadPrevSha.sh -previousSha=$(cat downloaded_previous.txt) +PREVIOUS_SHA=$(cat downloaded_previous.txt) echo "--- Upload new git sha" .buildkite/scripts/steps/code_coverage/reporting/uploadPrevSha.sh .buildkite/scripts/bootstrap.sh -echo "--- Download coverage artifacts" -buildkite-agent artifact download target/kibana-coverage/jest/* . -#buildkite-agent artifact download target/kibana-coverage/functional/* . -buildkite-agent artifact download target/ran_files/* . -ls -l target/ran_files/* || echo "### No ran-files found" - -echo "--- process HTML Links" -.buildkite/scripts/steps/code_coverage/reporting/prokLinks.sh - -echo "--- collect VCS Info" -.buildkite/scripts/steps/code_coverage/reporting/collectVcsInfo.sh - -echo "--- Jest: Reset file paths prefix, merge coverage files, and generate the final combined report" -# Jest: Reset file paths prefix to Kibana Dir of final worker -replacePaths "$KIBANA_DIR/target/kibana-coverage/jest" "CC_REPLACEMENT_ANCHOR" "$KIBANA_DIR" -yarn nyc report --nycrc-path src/dev/code_coverage/nyc_config/nyc.jest.config.js - -#echo "--- Functional: Reset file paths prefix, merge coverage files, and generate the final combined report" -# Functional: Reset file paths prefix to Kibana Dir of final worker -#set +e -#sed -ie "s|CC_REPLACEMENT_ANCHOR|${KIBANA_DIR}|g" target/kibana-coverage/functional/*.json -#echo "--- Begin Split and Merge for Functional" -#splitCoverage target/kibana-coverage/functional -#splitMerge -#set -e - -echo "--- Archive and upload combined reports" -collectAndUpload target/kibana-coverage/jest/kibana-jest-coverage.tar.gz \ - target/kibana-coverage/jest-combined -#collectAndUpload target/kibana-coverage/functional/kibana-functional-coverage.tar.gz \ -# target/kibana-coverage/functional-combined - -echo "--- Upload coverage static site" -.buildkite/scripts/steps/code_coverage/reporting/uploadStaticSite.sh - -echo "--- Ingest results to Kibana stats cluster" -.buildkite/scripts/steps/code_coverage/reporting/ingestData.sh 'elastic+kibana+code-coverage' \ - ${BUILDKITE_BUILD_NUMBER} ${BUILDKITE_BUILD_URL} ${previousSha} \ - 'src/dev/code_coverage/ingest_coverage/team_assignment/team_assignments.txt' +collectRan() { + buildkite-agent artifact download target/ran_files/* . + + while read -r x; do + ran=("${ran[@]}" "$(cat "$x")") + done <<<"$(find target/ran_files -maxdepth 1 -type f -name '*.txt')" + + echo "--- Collected Ran files: ${ran[*]}" +} + +uniqueifyRanConfigs() { + local xs=("$@") + local xss + xss=$(printf "%s\n" "${xs[@]}" | sort -u | tr '\n' ' ' | xargs) # xargs trims whitespace + uniqRanConfigs=("$xss") + echo "--- Uniq Ran files: ${uniqRanConfigs[*]}" +} + +fetchArtifacts() { + echo "--- Fetch coverage artifacts" + + local xs=("$@") + for x in "${xs[@]}"; do + buildkite-agent artifact download "target/kibana-coverage/${x}/*" . + done +} + +archiveReports() { + echo "--- Archive and upload combined reports" + + local xs=("$@") + for x in "${xs[@]}"; do + echo "### Collect and Upload for: ${x}" +# fileHeads "target/file-heads-archive-reports-for-${x}.txt" "target/kibana-coverage/${x}" +# dirListing "target/dir-listing-${x}-combined-during-archiveReports.txt" target/kibana-coverage/${x}-combined +# dirListing "target/dir-listing-${x}-during-archiveReports.txt" target/kibana-coverage/${x} + collectAndUpload "target/kibana-coverage/${x}/kibana-${x}-coverage.tar.gz" "target/kibana-coverage/${x}-combined" + done +} + +mergeAll() { + local xs=("$@") + + for x in "${xs[@]}"; do + if [ "$x" == "jest" ]; then + echo "--- [$x]: Reset file paths prefix, merge coverage files, and generate the final combined report" + replacePaths "$KIBANA_DIR/target/kibana-coverage/jest" "CC_REPLACEMENT_ANCHOR" "$KIBANA_DIR" + yarn nyc report --nycrc-path src/dev/code_coverage/nyc_config/nyc.jest.config.js + elif [ "$x" == "functional" ]; then + echo "---[$x] : Reset file paths prefix, merge coverage files, and generate the final combined report" + set +e + sed -ie "s|CC_REPLACEMENT_ANCHOR|${KIBANA_DIR}|g" target/kibana-coverage/functional/*.json + echo "--- Begin Split and Merge for Functional" + splitCoverage target/kibana-coverage/functional + splitMerge + set -e + fi + done +} + +modularize() { + collectRan + if [ -d target/ran_files ]; then + uniqueifyRanConfigs "${ran[@]}" + fetchArtifacts "${uniqRanConfigs[@]}" + mergeAll "${uniqRanConfigs[@]}" + archiveReports "${uniqRanConfigs[@]}" + .buildkite/scripts/steps/code_coverage/reporting/prokLinks.sh "${uniqRanConfigs[@]}" + .buildkite/scripts/steps/code_coverage/reporting/uploadStaticSite.sh "${uniqRanConfigs[@]}" + .buildkite/scripts/steps/code_coverage/reporting/collectVcsInfo.sh + source .buildkite/scripts/steps/code_coverage/reporting/ingestData.sh 'elastic+kibana+code-coverage' \ + "${BUILDKITE_BUILD_NUMBER}" "${BUILDKITE_BUILD_URL}" "${PREVIOUS_SHA}" \ + 'src/dev/code_coverage/ingest_coverage/team_assignment/team_assignments.txt' + ingestModular "${uniqRanConfigs[@]}" + else + echo "--- Found zero configs that ran, cancelling ingestion." + exit 11 + fi +} + +modularize +echo "### unique ran configs: ${uniqRanConfigs[*]}" diff --git a/.buildkite/scripts/steps/code_coverage/jest_integration.sh b/.buildkite/scripts/steps/code_coverage/jest_integration.sh index cf4b422a1d46d..cb59d15c612fc 100755 --- a/.buildkite/scripts/steps/code_coverage/jest_integration.sh +++ b/.buildkite/scripts/steps/code_coverage/jest_integration.sh @@ -15,4 +15,4 @@ echo '--- Jest Integration code coverage' .buildkite/scripts/steps/code_coverage/jest_parallel.sh jest.integration.config.js # So the last step "knows" this config ran -uploadRanFile "jest_integration" +uploadRanFile "jest" diff --git a/.buildkite/scripts/steps/code_coverage/reporting/collectVcsInfo.sh b/.buildkite/scripts/steps/code_coverage/reporting/collectVcsInfo.sh index 4e6b3907b6e34..098afa7dc9c61 100755 --- a/.buildkite/scripts/steps/code_coverage/reporting/collectVcsInfo.sh +++ b/.buildkite/scripts/steps/code_coverage/reporting/collectVcsInfo.sh @@ -2,6 +2,8 @@ set -euo pipefail +echo "--- collect VCS Info" + echo "### Prok'd Index File: ..." cat src/dev/code_coverage/www/index.html @@ -27,4 +29,4 @@ for X in "${!XS[@]}"; do } done echo "### VCS_INFO:" -cat VCS_INFO.txt \ No newline at end of file +cat VCS_INFO.txt diff --git a/.buildkite/scripts/steps/code_coverage/reporting/ingestData.sh b/.buildkite/scripts/steps/code_coverage/reporting/ingestData.sh index de006352d0b09..7eac3727cfc60 100755 --- a/.buildkite/scripts/steps/code_coverage/reporting/ingestData.sh +++ b/.buildkite/scripts/steps/code_coverage/reporting/ingestData.sh @@ -2,9 +2,6 @@ set -euo pipefail -echo "### Ingesting Code Coverage" -echo "" - COVERAGE_JOB_NAME=$1 export COVERAGE_JOB_NAME echo "### debug COVERAGE_JOB_NAME: ${COVERAGE_JOB_NAME}" @@ -31,27 +28,25 @@ echo "### debug TEAM_ASSIGN_PATH: ${TEAM_ASSIGN_PATH}" BUFFER_SIZE=500 export BUFFER_SIZE -echo "### debug BUFFER_SIZE: ${BUFFER_SIZE}" - -# Build team assignments file -echo "### Generate Team Assignments" -CI_STATS_DISABLED=true node scripts/generate_team_assignments.js \ - --verbose --src '.github/CODEOWNERS' --dest $TEAM_ASSIGN_PATH - -#for x in functional jest; do -# echo "### Ingesting coverage for ${x}" -# COVERAGE_SUMMARY_FILE="target/kibana-coverage/${x}-combined/coverage-summary.json" -# -# CI_STATS_DISABLED=true node scripts/ingest_coverage.js --path ${COVERAGE_SUMMARY_FILE} \ -# --vcsInfoPath ./VCS_INFO.txt --teamAssignmentsPath $TEAM_ASSIGN_PATH & -#done -#wait - -echo "### Ingesting coverage for JEST" -COVERAGE_SUMMARY_FILE="target/kibana-coverage/jest-combined/coverage-summary.json" - -CI_STATS_DISABLED=true node scripts/ingest_coverage.js --path ${COVERAGE_SUMMARY_FILE} \ - --vcsInfoPath ./VCS_INFO.txt --teamAssignmentsPath $TEAM_ASSIGN_PATH - -echo "--- Ingesting Code Coverage - Complete" -echo "" + +ingestModular() { + local xs=("$@") + + echo "--- Generate Team Assignments" + CI_STATS_DISABLED=true node scripts/generate_team_assignments.js \ + --verbose --src '.github/CODEOWNERS' --dest "$TEAM_ASSIGN_PATH" + + echo "--- Ingest results to Kibana stats cluster" + for x in "${xs[@]}"; do + echo "--- Ingesting coverage for ${x}" + + COVERAGE_SUMMARY_FILE="target/kibana-coverage/${x}-combined/coverage-summary.json" + + CI_STATS_DISABLED=true node scripts/ingest_coverage.js --path "${COVERAGE_SUMMARY_FILE}" \ + --vcsInfoPath ./VCS_INFO.txt --teamAssignmentsPath "$TEAM_ASSIGN_PATH" & + done + wait + + echo "--- Ingesting Code Coverage - Complete" + echo "" +} diff --git a/.buildkite/scripts/steps/code_coverage/reporting/prokLinks.sh b/.buildkite/scripts/steps/code_coverage/reporting/prokLinks.sh index 0b6d0ce8ea105..f7d03b5730b87 100755 --- a/.buildkite/scripts/steps/code_coverage/reporting/prokLinks.sh +++ b/.buildkite/scripts/steps/code_coverage/reporting/prokLinks.sh @@ -2,8 +2,20 @@ set -euo pipefail -cat << EOF > src/dev/code_coverage/www/index_partial_2.html - Latest Jest +echo "--- process HTML Links" + +xs=("$@") +len=${#xs[@]} + +# TODO-TRE: Maybe use more exhaustive logic instead of just length. +if [[ $len -eq 2 ]]; then + links="Latest JestLatest FTR" +else + links="Latest Jest" +fi + +cat <src/dev/code_coverage/www/index_partial_2.html + ${links} diff --git a/.buildkite/scripts/steps/code_coverage/reporting/uploadStaticSite.sh b/.buildkite/scripts/steps/code_coverage/reporting/uploadStaticSite.sh index dcb0b03b16d7c..02f2262075b89 100755 --- a/.buildkite/scripts/steps/code_coverage/reporting/uploadStaticSite.sh +++ b/.buildkite/scripts/steps/code_coverage/reporting/uploadStaticSite.sh @@ -2,19 +2,22 @@ set -euo pipefail +xs=("$@") + uploadPrefix="gs://elastic-bekitzur-kibana-coverage-live/" uploadPrefixWithTimeStamp="${uploadPrefix}${TIME_STAMP}/" -cat src/dev/code_coverage/www/index.html - -for x in 'src/dev/code_coverage/www/index.html' 'src/dev/code_coverage/www/404.html'; do - gsutil -m -q cp -r -a public-read -z js,css,html ${x} ${uploadPrefix} -done +uploadBase() { + for x in 'src/dev/code_coverage/www/index.html' 'src/dev/code_coverage/www/404.html'; do + gsutil -m -q cp -r -a public-read -z js,css,html "${x}" "${uploadPrefix}" + done +} -#gsutil -m -q cp -r -a public-read -z js,css,html ${x} ${uploadPrefixWithTimeStamp} -# -#for x in 'target/kibana-coverage/functional-combined' 'target/kibana-coverage/jest-combined'; do -# gsutil -m -q cp -r -a public-read -z js,css,html ${x} ${uploadPrefixWithTimeStamp} -#done +uploadRest() { + for x in "${xs[@]}"; do + gsutil -m -q cp -r -a public-read -z js,css,html "target/kibana-coverage/${x}-combined" "${uploadPrefixWithTimeStamp}" + done +} -gsutil -m -q cp -r -a public-read -z js,css,html 'target/kibana-coverage/jest-combined' ${uploadPrefixWithTimeStamp} +uploadBase +uploadRest diff --git a/.buildkite/scripts/steps/code_coverage/util.sh b/.buildkite/scripts/steps/code_coverage/util.sh index e7da75bb7573d..cb48d62695854 100755 --- a/.buildkite/scripts/steps/code_coverage/util.sh +++ b/.buildkite/scripts/steps/code_coverage/util.sh @@ -2,15 +2,27 @@ set -euo pipefail +header() { + local fileName=$1 + + echo "" >"$fileName" + + echo "### File Name:" >>"$fileName" + printf " %s\n\n" "$fileName" >>"$fileName" +} + # $1 file name, ex: "target/dir-listing-jest.txt" # $2 directory to be listed, ex: target/kibana-coverage/jest dirListing() { local fileName=$1 local dir=$2 - ls -l "$dir" >"$fileName" + header "$fileName" + + ls -l "$dir" >>"$fileName" printf "\n### %s \n\tlisted to: %s\n" "$dir" "$fileName" + buildkite-agent artifact upload "$fileName" printf "\n### %s Uploaded\n" "$fileName" @@ -29,15 +41,6 @@ replacePaths() { done } -header() { - local fileName=$1 - - echo "" >"$fileName" - - echo "### File Name:" >>"$fileName" - printf "\t%s\n" "$fileName" >>"$fileName" -} - fileHeads() { local fileName=$1 local dir=$2 diff --git a/.eslintrc.js b/.eslintrc.js index 036b2123ee254..f88b514b514a2 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -958,6 +958,19 @@ module.exports = { }, }, + { + // disable imports from legacy uptime plugin + files: ['x-pack/plugins/synthetics/public/apps/synthetics/**/*.{js,mjs,ts,tsx}'], + rules: { + 'no-restricted-imports': [ + 'error', + { + patterns: ['**/legacy_uptime/*'], + }, + ], + }, + }, + /** * Fleet overrides */ diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 375854b9c54b7..d55634c958289 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -1,10 +1,7 @@ on: pull_request_target: - branches: - - main - types: - - labeled - - closed + branches: ["main"] + types: ["labeled", "closed"] jobs: backport: @@ -19,9 +16,14 @@ jobs: ) steps: - name: Backport Action - uses: sqren/backport-github-action@v7.4.0 + uses: sqren/backport-github-action@v8.5.2 with: github_token: ${{secrets.KIBANAMACHINE_TOKEN}} - - name: Backport log - run: cat ~/.backport/backport.log + - name: Info log + if: ${{ success() }} + run: cat /home/runner/.backport/backport.info.log + + - name: Debug log + if: ${{ failure() }} + run: cat /home/runner/.backport/backport.debug.log diff --git a/docs/api/alerting.asciidoc b/docs/api/alerting.asciidoc index 931165ce5f485..44ed7b3b88739 100644 --- a/docs/api/alerting.asciidoc +++ b/docs/api/alerting.asciidoc @@ -31,7 +31,7 @@ The following APIs are available for Alerting. For deprecated APIs, refer to <>. -include::alerting/create_rule.asciidoc[] +include::alerting/create_rule.asciidoc[leveloffset=+1] include::alerting/update_rule.asciidoc[] include::alerting/get_rules.asciidoc[] include::alerting/delete_rule.asciidoc[] diff --git a/docs/api/alerting/create_rule.asciidoc b/docs/api/alerting/create_rule.asciidoc index 79ae7b0c39d6c..484866436d97d 100644 --- a/docs/api/alerting/create_rule.asciidoc +++ b/docs/api/alerting/create_rule.asciidoc @@ -1,5 +1,5 @@ [[create-rule-api]] -=== Create rule API +== Create rule API ++++ Create rule ++++ @@ -7,13 +7,23 @@ Create {kib} rules. [[create-rule-api-request]] -==== Request +=== {api-request-title} `POST :/api/alerting/rule/` `POST :/s//api/alerting/rule/` -==== {api-description-title} + +=== {api-prereq-title} + +You must have `all` privileges for the *Management* > *Stack Rules* feature or +for the *{ml-app}*, *{observability}*, or *Security* features, depending on the +`consumer` and `rule_type_id` of the rule you're creating. If the rule has +`actions`, you must also have `read` privileges for the *Management* > +*Actions and Connectors* feature. For more details, refer to +<>. + +=== {api-description-title} [WARNING] ==== @@ -25,84 +35,109 @@ If a user with different privileges updates the rule, its behavior might change. ==== [[create-rule-api-path-params]] -==== Path parameters +=== {api-path-parms-title} ``:: - (Optional, string) Specifies a UUID v1 or v4 to use instead of a randomly generated ID. +(Optional, string) Specifies a UUID v1 or v4 to use instead of a randomly +generated ID. `space_id`:: - (Optional, string) An identifier for the space. If `space_id` is not provided in the URL, the default space is used. +(Optional, string) An identifier for the space. If `space_id` is not provided in +the URL, the default space is used. +[role="child_attributes"] [[create-rule-api-request-body]] -==== Request body - -`name`:: - (Required, string) A name to reference and search. - -`tags`:: - (Optional, string array) A list of keywords to reference and search. - -`rule_type_id`:: - (Required, string) The ID of the rule type that you want to call when the rule is scheduled to run. +=== {api-request-body-title} -`schedule`:: - (Required, object) The schedule specifying when this rule should be run, using one of the available schedule formats specified under +`actions`:: +(Optional, object array) An array of action objects. + -._Schedule Formats_. +.Properties of the action objects: [%collapsible%open] ===== -A schedule is structured such that the key specifies the format you wish to use and its value specifies the schedule. -We currently support the _Interval format_ which specifies the interval in seconds, minutes, hours or days at which the rule should execute. -Example: `{ interval: "10s" }`, `{ interval: "5m" }`, `{ interval: "1h" }`, `{ interval: "1d" }`. +`group`::: +(Required, string) Grouping actions is recommended for escalations for different +types of alerts. If you don't need this, set this value to `default`. -There are plans to support multiple other schedule formats in the near future. -===== +`id`::: +(Required, string) The ID of the connector saved object. -`throttle`:: - (Optional, string) How often this rule should fire the same actions. This will prevent the rule from sending out the same notification over and over. For example, if a rule with a `schedule` of 1 minute stays in a triggered state for 90 minutes, setting a `throttle` of `10m` or `1h` will prevent it from sending 90 notifications during this period. +`params`::: +(Required, object) The map to the `params` that the +<> will receive. ` params` are handled as Mustache +templates and passed a default set of context. +===== -`notify_when`:: - (Required, string) The condition for throttling the notification: `onActionGroupChange`, `onActiveAlert`, or `onThrottleInterval`. +`consumer`:: +(Required, string) The name of the application or feature that owns the rule. +For example: `alerts`, `apm`, `discover`, `infrastructure`, `logs`, `metrics`, +`ml`, `monitoring`, `securitySolution`, `siem`, `stackAlerts`, or `uptime`. `enabled`:: - (Optional, boolean) Indicates if you want to run the rule on an interval basis after it is created. +(Optional, boolean) Indicates if you want to run the rule on an interval basis +after it is created. -`consumer`:: - (Required, string) The name of the application that owns the rule. This name has to match the Kibana Feature name, as that dictates the required RBAC privileges. +`name`:: +(Required, string) A name to reference and search. + +`notify_when`:: +(Required, string) The condition for throttling the notification: +`onActionGroupChange`, `onActiveAlert`, or `onThrottleInterval`. `params`:: - (Required, object) The parameters to pass to the rule type executor `params` value. This will also validate against the rule type params validator, if defined. +(Required, object) The parameters to pass to the rule type executor `params` +value. This will also validate against the rule type params validator, if defined. -`actions`:: - (Optional, object array) An array of the following action objects. +`rule_type_id`:: +(Required, string) The ID of the rule type that you want to call when the rule +is scheduled to run. For example, `.es-query`, `.index-threshold`, +`logs.alert.document.count`, `monitoring_alert_cluster_health`, +`siem.thresholdRule`, or `xpack.ml.anomaly_detection_alert`. For more +information, refer to <>. + +`schedule`:: +(Required, object) The schedule specifying when this rule should be run, using +one of the available schedule formats. + -.Properties of the action objects: +.Schedule formats [%collapsible%open] ===== - `group`::: - (Required, string) Grouping actions is recommended for escalations for different types of alerts. If you don't need this, set this value to `default`. +A schedule is structured such that the key specifies the format you wish to use +and its value specifies the schedule. - `id`::: - (Required, string) The ID of the connector saved object to execute. +We currently support the _interval format_ which specifies the interval in +seconds, minutes, hours or days at which the rule should run. For example: +`{ "interval": "10s" }`, `{ "interval": "5m" }`, `{ "interval": "1h" }`, or +`{ "interval": "1d" }`. - `params`::: - (Required, object) The map to the `params` that the <> will receive. ` params` are handled as Mustache templates and passed a default set of context. +There are plans to support multiple other schedule formats in the near future. ===== +`tags`:: +(Optional, string array) A list of keywords to reference and search. + +`throttle`:: +(Optional, string) How often this rule should fire the same actions. This will +prevent the rule from sending out the same notification over and over. For +example, if a rule with a `schedule` of 1 minute stays in a triggered state for +90 minutes, setting a `throttle` of `10m` or `1h` will prevent it from sending +90 notifications during this period. [[create-rule-api-request-codes]] -==== Response code +=== {api-response-codes-title} `200`:: Indicates a successful call. [[create-rule-api-example]] -==== Example +=== {api-examples-title} + +Create a rule that has actions associated with a server log connector: [source,sh] -------------------------------------------------- -$ curl -X POST api/alerting/rule -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d ' +POST api/alerting/rule { "params":{ "aggType":"avg", @@ -141,7 +176,7 @@ $ curl -X POST api/alerting/rule -H 'kbn-xsrf: true' -H 'Content-Type: applicat ], "notify_when":"onActionGroupChange", "name":"my alert" -}' +} -------------------------------------------------- // KIBANA @@ -151,7 +186,12 @@ The API returns the following: -------------------------------------------------- { "id": "41893910-6bca-11eb-9e0d-85d233e3ee35", - "notify_when": "onActionGroupChange", + "consumer": "alerts", + "tags": ["cpu"], + "name": "my alert", + "enabled": true, + "throttle": null, + "schedule": {"interval": "1m"}, "params": { "aggType": "avg", "termSize": 6, @@ -159,49 +199,37 @@ The API returns the following: "timeWindowSize": 5, "timeWindowUnit": "m", "groupBy": "top", - "threshold": [ - 1000 - ], - "index": [ - ".kibana" - ], + "threshold": [1000], + "index": [".test-index"], "timeField": "@timestamp", "aggField": "sheet.version", "termField": "name.keyword" }, - "consumer": "alerts", "rule_type_id": ".index-threshold", - "schedule": { - "interval": "1m" - }, + "scheduled_task_id": "425b0800-6bca-11eb-9e0d-85d233e3ee35", + "snooze_schedule":[], + "created_by": "elastic", + "updated_by": "elastic", + "created_at": "2022-06-08T17:20:31.632Z", + "updated_at": "2022-06-08T17:20:31.632Z", + "api_key_owner": "elastic", + "notify_when": "onActionGroupChange", + "mute_all": false, + "muted_alert_ids": [], + "execution_status": { + "last_execution_date": "2022-06-08T17:20:31.632Z", + "status": "pending" + } "actions": [ { - "connector_type_id": ".server-log", "group": "threshold met", + "id": "dceeb5d0-6b41-11eb-802b-85b0c1bc8ba2", "params": { "level": "info", "message": "alert {{alertName}} is active for group {{context.group}}:\n\n- Value: {{context.value}}\n- Conditions Met: {{context.conditions}} over {{params.timeWindowSize}}{{params.timeWindowUnit}}\n- Timestamp: {{context.date}}" }, - "id": "dceeb5d0-6b41-11eb-802b-85b0c1bc8ba2" + "connector_type_id": ".server-log" } - ], - "tags": [ - "cpu" - ], - "name": "my alert", - "enabled": true, - "throttle": null, - "api_key_owner": "elastic", - "created_by": "elastic", - "updated_by": "elastic", - "mute_all": false, - "muted_alert_ids": [], - "updated_at": "2021-02-10T18:03:19.961Z", - "created_at": "2021-02-10T18:03:19.961Z", - "scheduled_task_id": "425b0800-6bca-11eb-9e0d-85d233e3ee35", - "execution_status": { - "last_execution_date": "2021-02-10T18:03:19.966Z", - "status": "pending" - } + ] } -------------------------------------------------- diff --git a/docs/dev-tools/console/console.asciidoc b/docs/dev-tools/console/console.asciidoc index 69f81d838c143..6a28f0f433d46 100644 --- a/docs/dev-tools/console/console.asciidoc +++ b/docs/dev-tools/console/console.asciidoc @@ -1,9 +1,9 @@ [[console-kibana]] -== Run {es} API requests +== Run API requests -Interact with the REST API of {es} with *Console*. You can: +Interact with the REST APIs of {es} and {kib} with *Console*. With *Console*, you can: -* Send requests to {es} and view the responses +* Send requests and view the responses * View API documentation * Get your request history @@ -12,8 +12,6 @@ To get started, open the main menu, click *Dev Tools*, then click *Console*. [role="screenshot"] image::dev-tools/console/images/console.png["Console"] -NOTE: **Console** supports only Elasticsearch APIs. You are unable to interact with the {kib} APIs with **Console** and must use curl or another HTTP tool instead. - [float] [[console-api]] === Write requests @@ -43,6 +41,15 @@ curl -XGET "http://localhost:9200/_search" -d' }' ---------------------------------- +Prepend requests to a {kib} API endpoint with `kbn:` + +[source,bash] +-------------------------------------------------- +`GET kbn:/api/index_management/indices` +-------------------------------------------------- + + + When you paste the command into *Console*, {kib} automatically converts it to *Console* syntax. Alternatively, if you want to see *Console* syntax in cURL, click the action icon (image:dev-tools/console/images/wrench.png[]) and select *Copy as cURL*. diff --git a/docs/management/images/management-saved-objects.png b/docs/management/images/management-saved-objects.png index 0ee720cfdb39d..fc61b92841e0e 100644 Binary files a/docs/management/images/management-saved-objects.png and b/docs/management/images/management-saved-objects.png differ diff --git a/docs/management/managing-saved-objects.asciidoc b/docs/management/managing-saved-objects.asciidoc index 39d294df43a5a..ee1247501e8da 100644 --- a/docs/management/managing-saved-objects.asciidoc +++ b/docs/management/managing-saved-objects.asciidoc @@ -97,12 +97,25 @@ limits the number of saved objects which may be exported. === Copy to other {kib} spaces To copy a saved object to another space, click the actions icon image:images/actions_icon.png[Actions icon] -and select *Copy to space*. From here, you can select the spaces in which to copy the object. +and select *Copy to spaces*. From here, you can select the spaces in which to copy the object. You can also select whether to automatically overwrite any conflicts in the target spaces, or resolve them manually. WARNING: The copy operation automatically includes child objects that are related to the saved objects. If you don't want this behavior, use the <> instead. +[float] +[role="xpack"] +[[managing-saved-objects-share-to-space]] +=== Share to other {kib} spaces + +To share a saved object to another space -- which makes a single saved object available in multiple spaces -- click the actions icon +image:images/actions_icon.png[Actions icon] and select *Share to spaces*. From here, you can select the spaces in which to share the object, +or indicate that you want the object to be shared to _all spaces_, which includes those that exist now and any created in the future. + +Not all saved object types are shareable. If an object is shareable, the Spaces column shows which spaces it exists in. You can also click +those space icons to open the Share UI. + +WARNING: The share operation automatically includes child objects that are related to the saved objects. include::saved-objects/saved-object-ids.asciidoc[] diff --git a/docs/settings/logging-settings.asciidoc b/docs/settings/logging-settings.asciidoc index a9053b90ce722..89270f83131d8 100644 --- a/docs/settings/logging-settings.asciidoc +++ b/docs/settings/logging-settings.asciidoc @@ -60,7 +60,7 @@ The following table serves as a quick reference for different logging configurat | The suffix to append to the file path when rolling. Must include `%i`. | `logging.appenders[]..strategy.max` -| The maximum number of files to keep. Optional. Default is `7`. +| The maximum number of files to keep. Optional. Default is `7` and the maximum is `100`. | `logging.appenders[]..layout.type` | Determines how the log messages are displayed. Options are `pattern`, which provides human-readable output, or `json`, which provides ECS-compliant output. Required. diff --git a/docs/user/api.asciidoc b/docs/user/api.asciidoc index 5119762f9ac6b..0cfd4620b7cb5 100644 --- a/docs/user/api.asciidoc +++ b/docs/user/api.asciidoc @@ -9,11 +9,13 @@ deploying {kib}. [[using-apis]] == Using the APIs -Interact with the {kib} APIs through the `curl` command and HTTP and HTTPs protocols. +Prepend any {kib} API endpoint with `kbn:` and send the request through < Console>>. +For example: -It is recommended that you use HTTPs on port 5601 because it is more secure. - -NOTE: The {kib} Console supports only Elasticsearch APIs. You are unable to interact with the {kib} APIs with the Console and must use `curl` or another HTTP tool instead. For more information, refer to <>. +[source,sh] +-------------------------------------------------- +`GET kbn:/api/index_management/indices` +-------------------------------------------------- [float] [[api-authentication]] diff --git a/docs/user/dashboard/images/dashboard_controlsClearSelections_8.3.0.png b/docs/user/dashboard/images/dashboard_controlsClearSelections_8.3.0.png new file mode 100644 index 0000000000000..177ed22b9211f Binary files /dev/null and b/docs/user/dashboard/images/dashboard_controlsClearSelections_8.3.0.png differ diff --git a/docs/user/dashboard/images/dashboard_controlsEditControl_8.3.0.png b/docs/user/dashboard/images/dashboard_controlsEditControl_8.3.0.png new file mode 100644 index 0000000000000..9687f80ea3802 Binary files /dev/null and b/docs/user/dashboard/images/dashboard_controlsEditControl_8.3.0.png differ diff --git a/docs/user/dashboard/images/dashboard_controlsOptionsList_8.3.0.png b/docs/user/dashboard/images/dashboard_controlsOptionsList_8.3.0.png new file mode 100644 index 0000000000000..a145f428474fa Binary files /dev/null and b/docs/user/dashboard/images/dashboard_controlsOptionsList_8.3.0.png differ diff --git a/docs/user/dashboard/images/dashboard_controlsRangeSlider_8.3.0.png b/docs/user/dashboard/images/dashboard_controlsRangeSlider_8.3.0.png new file mode 100644 index 0000000000000..73def2756aa48 Binary files /dev/null and b/docs/user/dashboard/images/dashboard_controlsRangeSlider_8.3.0.png differ diff --git a/docs/user/dashboard/images/dashboard_controlsRemoveControl_8.3.0.png b/docs/user/dashboard/images/dashboard_controlsRemoveControl_8.3.0.png new file mode 100644 index 0000000000000..e6151888787cb Binary files /dev/null and b/docs/user/dashboard/images/dashboard_controlsRemoveControl_8.3.0.png differ diff --git a/docs/user/dashboard/images/dashboard_showOnlySelectedOptions_8.3.0.png b/docs/user/dashboard/images/dashboard_showOnlySelectedOptions_8.3.0.png new file mode 100644 index 0000000000000..9617bd1f6cda1 Binary files /dev/null and b/docs/user/dashboard/images/dashboard_showOnlySelectedOptions_8.3.0.png differ diff --git a/docs/user/dashboard/make-dashboards-interactive.asciidoc b/docs/user/dashboard/make-dashboards-interactive.asciidoc index 06c6675bedba3..eaff5bc1bda60 100644 --- a/docs/user/dashboard/make-dashboards-interactive.asciidoc +++ b/docs/user/dashboard/make-dashboards-interactive.asciidoc @@ -2,12 +2,12 @@ [[drilldowns]] == Make dashboards interactive -:keywords: administrator, analyst, concept, task, analyze, dashboard, controls, drilldowns +:keywords: administrator, analyst, concept, task, analyze, dashboard, controls, range slider, options list, author, drilldowns :description: Add interactive capabilities to your dashboard, such as controls that allow \ you to apply dashboard-level filters, and drilldowns that allow you to navigate to other \ dashboards and external websites. -Add interactive capabilities to your dashboard, such as controls that allow you to apply dashboard-level filters, and drilldowns that allow you to navigate to *Discover*, other dashboards, and external websites. +Add interactive capabilities to your dashboard, such as interactive filter controls, and drilldowns that allow you to navigate to *Discover*, other dashboards, and external websites. ++++