Skip to content

Commit

Permalink
Merge branch 'main' into add-alibabacloud-inference
Browse files Browse the repository at this point in the history
  • Loading branch information
davidkyle authored Aug 8, 2024
2 parents 00eee4c + f02be07 commit 873e6f6
Show file tree
Hide file tree
Showing 187 changed files with 2,046 additions and 2,644 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,18 @@ curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scrip
tmpOutputFile=$(mktemp)
trap "rm $tmpOutputFile" EXIT

set +e
gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile

# Capture the return value
retval=$?
set -e

# Now read the content from the temporary file into a variable
perfOutput=$(cat $tmpOutputFile | sed -n '/Performance Characteristics/,/See https:\/\/gradle.com\/bvs\/main\/Gradle.md#performance-characteristics for details./p' | sed '$d' | sed 's/\x1b\[[0-9;]*m//g')
investigationOutput=$(cat $tmpOutputFile | sed -n '/Investigation Quick Links/,$p' | sed 's/\x1b\[[0-9;]*m//g')

# Initialize HTML output variable
summaryHtml="<h4>Performance Characteristics</h4>"
summaryHtml="<h4>Build Cache Performance Characteristics</h4>"
summaryHtml+="<ul>"

# Process each line of the string
Expand Down
27 changes: 27 additions & 0 deletions .buildkite/scripts/gradle-configuration-cache-validation.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/bash

set -euo pipefail

# TODO/ FIXIT without a full resolved gradle home, we see issues configuration cache reuse
./gradlew --max-workers=8 --parallel --scan --no-daemon precommit

./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2

# Create a temporary file
tmpOutputFile=$(mktemp)
trap "rm $tmpOutputFile" EXIT

echo "2nd run"
# TODO run-gradle.sh script causes issues because of init script handling
./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile

# Check if the command was successful
if grep -q "Configuration cache entry reused." $tmpOutputFile; then
echo "Gradle configuration cache reused"
exit 0
else
echo "Failed to reuse Gradle configuration cache."
exit 1
fi


Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public void apply(Project target) {
: System.getenv("BUILDKITE_BUILD_NUMBER");
String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST");
if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) {
File targetFile = target.file("build/" + buildNumber + ".tar.bz2");
File targetFile = calculateTargetFile(target, buildNumber);
File projectDir = target.getProjectDir();
File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/");
DevelocityConfiguration extension = target.getExtensions().getByType(DevelocityConfiguration.class);
Expand All @@ -86,9 +86,19 @@ public void apply(Project target) {
}
}

private File calculateTargetFile(Project target, String buildNumber) {
File uploadFile = target.file("build/" + buildNumber + ".tar.bz2");
int artifactIndex = 1;
while (uploadFile.exists()) {
uploadFile = target.file("build/" + buildNumber + "-" + artifactIndex++ + ".tar.bz2");
}
return uploadFile;
}

private List<File> resolveProjectLogs(File projectDir) {
var projectDirFiles = getFileOperations().fileTree(projectDir);
projectDirFiles.include("**/*.hprof");
projectDirFiles.include("**/build/reports/configuration-cache/**");
projectDirFiles.include("**/build/test-results/**/*.xml");
projectDirFiles.include("**/build/testclusters/**");
projectDirFiles.include("**/build/testrun/*/temp/**");
Expand Down
6 changes: 6 additions & 0 deletions docs/changelog/111624.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 111624
summary: Extend logging for dropped warning headers
area: Infra/Core
type: enhancement
issues:
- 90527
5 changes: 5 additions & 0 deletions docs/changelog/111673.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 111673
summary: Properly handle filters on `TextSimilarityRank` retriever
area: Ranking
type: bug
issues: []
30 changes: 22 additions & 8 deletions docs/reference/esql/esql-across-clusters.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,6 @@ Refer to <<remote-clusters-cert, TLS certificate authentication>> for prerequisi
[[esql-ccs-security-model-api-key]]
===== API key authentication

[NOTE]
====
`ENRICH` is *not supported* in this version when using {esql} with the API key based security model.
====

The following information pertains to using {esql} across clusters with the <<remote-clusters-api-key, *API key based security model*>>. You'll need to follow the steps on that page for the *full setup instructions*. This page only contains additional information specific to {esql}.

API key based cross-cluster search (CCS) enables more granular control over allowed actions between clusters.
Expand All @@ -71,6 +66,7 @@ You will need to:

Using {esql} with the API key based security model requires some additional permissions that may not be needed when using the traditional query DSL based search.
The following example API call creates a role that can query remote indices using {esql} when using the API key based security model.
The final privilege, `remote_cluster`, is required to allow remote enrich operations.

[source,console]
----
Expand All @@ -89,7 +85,17 @@ POST /_security/role/remote1
"privileges": [ "read","read_cross_cluster" ], <4>
"clusters" : ["my_remote_cluster"] <5>
}
]
],
"remote_cluster": [ <6>
{
"privileges": [
"monitor_enrich"
],
"clusters": [
"my_remote_cluster"
]
}
]
}
----

Expand All @@ -100,6 +106,7 @@ POST /_security/role/remote1
<5> The remote clusters to which these privileges apply.
This remote cluster must be configured with a <<security-api-create-cross-cluster-api-key,cross-cluster API key>> and connected to the remote cluster before the remote index can be queried.
Verify connection using the <<cluster-remote-info, Remote cluster info>> API.
<6> Required to allow remote enrichment. Without this, the user cannot read from the `.enrich` indices on the remote cluster. The `remote_cluster` security privilege was introduced in version *8.15.0*.

You will then need a user or API key with the permissions you created above. The following example API call creates a user with the `remote1` role.

Expand All @@ -114,6 +121,11 @@ POST /_security/user/remote_user

Remember that all cross-cluster requests from the local cluster are bound by the cross cluster API key’s privileges, which are controlled by the remote cluster's administrator.

[TIP]
====
Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to add the new permissions required for {esql} with ENRICH.
====

[discrete]
[[ccq-remote-cluster-setup]]
==== Remote cluster setup
Expand Down Expand Up @@ -174,9 +186,11 @@ clusters, aiming to minimize computation or inter-cluster data transfer. Ensurin
the policy exists with consistent data on both the local cluster and the remote
clusters is critical for ES|QL to produce a consistent query result.

[NOTE]
[TIP]
====
Enrich across clusters is *not supported* in this version when using {esql} with the <<remote-clusters-api-key, *API key based security model*>>.
Enrich in {esql} across clusters using the API key based security model was introduced in version *8.15.0*.
Cross cluster API keys created in versions prior to 8.15.0 will need to replaced or updated to use the new required permissions.
Refer to the example in the <<esql-ccs-security-model-api-key,API key authentication>> section.
====

In the following example, the enrich with `hosts` policy can be executed on
Expand Down
12 changes: 12 additions & 0 deletions docs/reference/esql/functions/kibana/definition/mv_count.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions docs/reference/esql/functions/kibana/definition/mv_first.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions docs/reference/esql/functions/kibana/definition/mv_last.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions docs/reference/esql/functions/kibana/definition/mv_max.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions docs/reference/esql/functions/kibana/definition/mv_min.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions docs/reference/esql/functions/types/mv_count.asciidoc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions docs/reference/esql/functions/types/mv_first.asciidoc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions docs/reference/esql/functions/types/mv_last.asciidoc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions docs/reference/esql/functions/types/mv_max.asciidoc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions docs/reference/esql/functions/types/mv_min.asciidoc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ The result would then have the `errors` field set to `true` and hold the error f
"details": {
"my_admin_role": { <4>
"type": "action_request_validation_exception",
"reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_data_stream_global_retention,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_data_stream_global_retention,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;"
"reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;"
}
}
}
Expand Down
10 changes: 0 additions & 10 deletions docs/reference/security/authorization/privileges.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,6 @@ deprecated[7.5] Use `manage_transform` instead.
+
This privilege is not available in {serverless-full}.

`manage_data_stream_global_retention`::
All operations related to managing the data stream global retention settings.
+
This privilege is not available in {serverless-full}.

`manage_enrich`::
All operations related to managing and executing enrich policies.

Expand Down Expand Up @@ -228,11 +223,6 @@ security roles of the user who created or updated them.
All cluster read-only operations, like cluster health and state, hot threads,
node info, node and cluster stats, and pending cluster tasks.

`monitor_data_stream_global_retention`::
Allows the retrieval of the data stream global retention settings.
+
This privilege is not available in {serverless-full}.

`monitor_enrich`::
All read-only operations related to managing and executing enrich policies.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
package org.elasticsearch.nativeaccess;

import org.elasticsearch.test.ESTestCase;
import org.junit.Before;

import java.io.IOException;
import java.nio.file.Path;
Expand All @@ -17,8 +18,14 @@
import static org.hamcrest.Matchers.equalTo;

public class PreallocateTests extends ESTestCase {
public void testPreallocate() throws IOException {

@Before
public void setup() {
assumeFalse("no preallocate on windows", System.getProperty("os.name").startsWith("Windows"));
assumeFalse("preallocate not supported on encrypted block devices", "encryption-at-rest".equals(System.getenv("BUILDKITE_LABEL")));
}

public void testPreallocate() throws IOException {
Path file = createTempFile();
long size = 1024 * 1024; // 1 MB
var nativeAccess = NativeAccess.instance();
Expand All @@ -29,7 +36,6 @@ public void testPreallocate() throws IOException {
}

public void testPreallocateNonExistingFile() {
assumeFalse("no preallocate on windows", System.getProperty("os.name").startsWith("Windows"));
Path file = createTempDir().resolve("intermediate-dir").resolve("test-preallocate");
long size = 1024 * 1024; // 1 MB
var nativeAccess = NativeAccess.instance();
Expand All @@ -40,7 +46,6 @@ public void testPreallocateNonExistingFile() {
}

public void testPreallocateNonExistingDirectory() {
assumeFalse("no preallocate on windows", System.getProperty("os.name").startsWith("Windows"));
Path file = createTempDir().resolve("intermediate-dir").resolve("test-preallocate");
long size = 1024 * 1024; // 1 MB
var nativeAccess = NativeAccess.instance();
Expand Down
Loading

0 comments on commit 873e6f6

Please sign in to comment.