Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into InternalSignificant…
Browse files Browse the repository at this point in the history
…TermsLean
  • Loading branch information
iverase committed Nov 29, 2024
2 parents a749c23 + 5935f76 commit a50d069
Show file tree
Hide file tree
Showing 45 changed files with 1,148 additions and 305 deletions.
15 changes: 1 addition & 14 deletions .buildkite/scripts/dra-workflow.sh
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ find "$WORKSPACE" -type d -path "*/build/distributions" -exec chmod a+w {} \;

echo --- Running release-manager

set +e
# Artifacts should be generated
docker run --rm \
--name release-manager \
Expand All @@ -92,16 +91,4 @@ docker run --rm \
--version "$ES_VERSION" \
--artifact-set main \
--dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \
--dependency "ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \
2>&1 | tee release-manager.log
EXIT_CODE=$?
set -e

# This failure is just generating a ton of noise right now, so let's just ignore it
# This should be removed once this issue has been fixed
if grep "elasticsearch-ubi-9.0.0-SNAPSHOT-docker-image.tar.gz" release-manager.log; then
echo "Ignoring error about missing ubi artifact"
exit 0
fi

exit "$EXIT_CODE"
--dependency "ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json"
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
import org.gradle.api.Project;

import java.io.File;
import java.util.Arrays;
import java.util.Map;

/**
* This plugin configures formatting for Java source using Spotless
Expand Down Expand Up @@ -64,7 +66,8 @@ public void apply(Project project) {
java.importOrderFile(new File(elasticsearchWorkspace, importOrderPath));

// Most formatting is done through the Eclipse formatter
java.eclipse().configFile(new File(elasticsearchWorkspace, formatterConfigPath));
java.eclipse().withP2Mirrors(Map.of("https://download.eclipse.org/", "https://mirror.umd.edu/eclipse/"))
.configFile(new File(elasticsearchWorkspace, formatterConfigPath));

// Ensure blank lines are actually empty. Since formatters are applied in
// order, apply this one last, otherwise non-empty blank lines can creep
Expand Down
18 changes: 18 additions & 0 deletions docs/changelog/104125.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
pr: 104125
summary: Disable machine learning on macOS x86_64
area: Machine Learning
type: breaking
issues: []
breaking:
title: Disable machine learning on macOS x86_64
area: Packaging
details: The machine learning plugin is permanently disabled on macOS x86_64.
For the last three years Apple has been selling hardware based on the arm64
architecture, and support will increasingly focus on this architecture in
the future. Changes to upstream dependencies of Elastic's machine learning
functionality have made it unviable for Elastic to continue to build machine
learning on macOS x86_64.
impact: To continue to use machine learning functionality on macOS please switch to
an arm64 machine (Apple silicon). Alternatively, it will still be possible to run
Elasticsearch with machine learning enabled in a Docker container on macOS x86_64.
notable: false
5 changes: 5 additions & 0 deletions docs/changelog/111494.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 111494
summary: Extensible Completion Postings Formats
area: "Suggesters"
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/117606.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 117606
summary: Remove deprecated sort from reindex operation within dataframe analytics procedure
area: Machine Learning
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/117618.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 117618
summary: SearchStatesIt failures reported by CI
area: Search
type: bug
issues: [116617, 116618]
5 changes: 5 additions & 0 deletions docs/changelog/117655.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 117655
summary: Add nulls support to Categorize
area: ES|QL
type: enhancement
issues: []
32 changes: 16 additions & 16 deletions docs/reference/quickstart/full-text-filtering-tutorial.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -511,8 +511,9 @@ In this tutorial scenario it's useful for when users have complex requirements f

Let's create a query that addresses the following user needs:

* Must be a vegetarian main course
* Must be a vegetarian recipe
* Should contain "curry" or "spicy" in the title or description
* Should be a main course
* Must not be a dessert
* Must have a rating of at least 4.5
* Should prefer recipes published in the last month
Expand All @@ -524,16 +525,7 @@ GET /cooking_blog/_search
"query": {
"bool": {
"must": [
{
"term": {
"category.keyword": "Main Course"
}
},
{
"term": {
"tags": "vegetarian"
}
},
{ "term": { "tags": "vegetarian" } },
{
"range": {
"rating": {
Expand All @@ -543,10 +535,18 @@ GET /cooking_blog/_search
}
],
"should": [
{
"term": {
"category": "Main Course"
}
},
{
"multi_match": {
"query": "curry spicy",
"fields": ["title^2", "description"]
"fields": [
"title^2",
"description"
]
}
},
{
Expand Down Expand Up @@ -590,12 +590,12 @@ GET /cooking_blog/_search
"value": 1,
"relation": "eq"
},
"max_score": 7.9835095,
"max_score": 7.444513,
"hits": [
{
"_index": "cooking_blog",
"_id": "2",
"_score": 7.9835095,
"_score": 7.444513,
"_source": {
"title": "Spicy Thai Green Curry: A Vegetarian Adventure", <1>
"description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", <2>
Expand All @@ -619,8 +619,8 @@ GET /cooking_blog/_search
<1> The title contains "Spicy" and "Curry", matching our should condition. With the default <<type-best-fields,best_fields>> behavior, this field contributes most to the relevance score.
<2> While the description also contains matching terms, only the best matching field's score is used by default.
<3> The recipe was published within the last month, satisfying our recency preference.
<4> The "Main Course" category matches our `must` condition.
<5> The "vegetarian" tag satisfies another `must` condition, while "curry" and "spicy" tags align with our `should` preferences.
<4> The "Main Course" category satisfies another `should` condition.
<5> The "vegetarian" tag satisfies a `must` condition, while "curry" and "spicy" tags align with our `should` preferences.
<6> The rating of 4.6 meets our minimum rating requirement of 4.5.
==============

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,13 @@ protected Number randomNumber() {

public void testEncodeDecodeExactScalingFactor() {
double v = randomValue();
assertThat(encodeDecode(1 / v, v), equalTo(1 / v));
double expected = 1 / v;
// We don't produce infinities while decoding. See #testDecodeHandlingInfinity().
if (Double.isInfinite(expected)) {
var sign = expected == Double.POSITIVE_INFINITY ? 1 : -1;
expected = sign * Double.MAX_VALUE;
}
assertThat(encodeDecode(1 / v, v), equalTo(expected));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
package org.elasticsearch.repositories.azure;

import fixture.azure.AzureHttpHandler;
import fixture.azure.MockAzureBlobStore;

import com.azure.storage.common.policy.RequestRetryOptions;
import com.azure.storage.common.policy.RetryPolicyType;
Expand Down Expand Up @@ -184,7 +185,12 @@ long getUploadBlockSize() {
@SuppressForbidden(reason = "this test uses a HttpHandler to emulate an Azure endpoint")
private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implements BlobStoreHttpHandler {
AzureBlobStoreHttpHandler(final String account, final String container) {
super(account, container, null /* no auth header validation - sometimes it's omitted in these tests (TODO why?) */);
super(
account,
container,
null /* no auth header validation - sometimes it's omitted in these tests (TODO why?) */,
MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE
);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
package org.elasticsearch.repositories.azure;

import fixture.azure.AzureHttpFixture;
import fixture.azure.MockAzureBlobStore;

import com.azure.core.exception.HttpResponseException;
import com.azure.storage.blob.BlobContainerClient;
Expand Down Expand Up @@ -60,7 +61,8 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
System.getProperty("test.azure.container"),
System.getProperty("test.azure.tenant_id"),
System.getProperty("test.azure.client_id"),
AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_ACCOUNT)
AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_ACCOUNT),
MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE
);

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ protected String buildKey(String blobName) {
}

private boolean skipRegisterOperation(ActionListener<?> listener) {
return skipCas(listener) || skipIfNotPrimaryOnlyLocationMode(listener);
return skipIfNotPrimaryOnlyLocationMode(listener);
}

private boolean skipIfNotPrimaryOnlyLocationMode(ActionListener<?> listener) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
import com.azure.storage.blob.models.ListBlobsOptions;
import com.azure.storage.blob.options.BlobParallelUploadOptions;
import com.azure.storage.blob.options.BlockBlobSimpleUploadOptions;
import com.azure.storage.blob.specialized.BlobLeaseClient;
import com.azure.storage.blob.specialized.BlobLeaseClientBuilder;
import com.azure.storage.blob.specialized.BlockBlobAsyncClient;

Expand Down Expand Up @@ -1010,7 +1011,7 @@ private static BytesReference innerCompareAndExchangeRegister(
}
return currentValue;
} finally {
leaseClient.releaseLease();
bestEffortRelease(leaseClient);
}
} else {
if (expected.length() == 0) {
Expand All @@ -1020,6 +1021,29 @@ private static BytesReference innerCompareAndExchangeRegister(
}
}

/**
* Release the lease, ignoring conflicts due to expiry
*
* @see <a href="https://learn.microsoft.com/en-us/rest/api/storageservices/lease-blob?outcomes-of-lease-operations-on-blobs-by-lease-state">Outcomes of lease operations by lease state</a>
* @param leaseClient The client for the lease
*/
private static void bestEffortRelease(BlobLeaseClient leaseClient) {
try {
leaseClient.releaseLease();
} catch (BlobStorageException blobStorageException) {
if (blobStorageException.getStatusCode() == RestStatus.CONFLICT.getStatus()) {
// This is OK, we tried to release a lease that was expired/re-acquired
logger.debug(
"Ignored conflict on release: errorCode={}, message={}",
blobStorageException.getErrorCode(),
blobStorageException.getMessage()
);
} else {
throw blobStorageException;
}
}
}

private static BytesReference downloadRegisterBlob(
String containerPath,
String blobKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
package org.elasticsearch.repositories.azure;

import fixture.azure.AzureHttpHandler;
import fixture.azure.MockAzureBlobStore;

import org.elasticsearch.common.blobstore.OperationPurpose;
import org.elasticsearch.common.bytes.BytesReference;
Expand All @@ -26,7 +27,7 @@ public class AzureBlobContainerStatsTests extends AbstractAzureServerTestCase {
@SuppressForbidden(reason = "use a http server")
@Before
public void configureAzureHandler() {
httpServer.createContext("/", new AzureHttpHandler(ACCOUNT, CONTAINER, null));
httpServer.createContext("/", new AzureHttpHandler(ACCOUNT, CONTAINER, null, MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE));
}

public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
package org.elasticsearch.repositories.azure;

import fixture.azure.AzureHttpFixture;
import fixture.azure.MockAzureBlobStore;

import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
Expand Down Expand Up @@ -47,7 +48,8 @@ public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestC
AZURE_TEST_CONTAINER,
AZURE_TEST_TENANT_ID,
AZURE_TEST_CLIENT_ID,
decideAuthHeaderPredicate()
decideAuthHeaderPredicate(),
MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE
);

private static Predicate<String> decideAuthHeaderPredicate() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,20 @@ setup:
container: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
client: integration_test

---
"Register a read-only repository with a non existing container":

- do:
catch: /repository_verification_exception/
snapshot.create_repository:
repository: repository
body:
type: azure
settings:
container: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
client: integration_test
readonly: true

---
"Register a repository with a non existing client":

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
package org.elasticsearch.repositories.s3;

import fixture.s3.S3HttpFixture;
import io.netty.handler.codec.http.HttpMethod;

import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
Expand Down Expand Up @@ -61,8 +62,6 @@ protected String getTestRestCluster() {
}

public void testReloadCredentialsFromKeystore() throws IOException {
assumeFalse("doesn't work in a FIPS JVM, but that's ok", inFipsJvm());

// Register repository (?verify=false because we don't have access to the blob store yet)
final var repositoryName = randomIdentifier();
registerRepository(
Expand All @@ -77,15 +76,16 @@ public void testReloadCredentialsFromKeystore() throws IOException {
final var accessKey1 = randomIdentifier();
repositoryAccessKey = accessKey1;
keystoreSettings.put("s3.client.default.access_key", accessKey1);
keystoreSettings.put("s3.client.default.secret_key", randomIdentifier());
keystoreSettings.put("s3.client.default.secret_key", randomSecretKey());
cluster.updateStoredSecureSettings();
assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings")));

assertOK(client().performRequest(createReloadSecureSettingsRequest()));

// Check access using initial credentials
assertOK(client().performRequest(verifyRequest));

// Rotate credentials in blob store
final var accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier);
final var accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomSecretKey);
repositoryAccessKey = accessKey2;

// Ensure that initial credentials now invalid
Expand All @@ -99,10 +99,17 @@ public void testReloadCredentialsFromKeystore() throws IOException {
// Set up refreshed credentials
keystoreSettings.put("s3.client.default.access_key", accessKey2);
cluster.updateStoredSecureSettings();
assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings")));
assertOK(client().performRequest(createReloadSecureSettingsRequest()));

// Check access using refreshed credentials
assertOK(client().performRequest(verifyRequest));
}

private Request createReloadSecureSettingsRequest() throws IOException {
return newXContentRequest(
HttpMethod.POST,
"/_nodes/reload_secure_settings",
(b, p) -> inFipsJvm() ? b.field("secure_settings_password", "keystore-password") : b
);
}
}
Loading

0 comments on commit a50d069

Please sign in to comment.