Skip to content

Commit

Permalink
Merge branch 'main' into apmdata/restore-event-ingested
Browse files Browse the repository at this point in the history
  • Loading branch information
inge4pres authored Dec 9, 2024
2 parents e4222fa + 4b868b0 commit 3cba4ad
Show file tree
Hide file tree
Showing 67 changed files with 3,780 additions and 363 deletions.
5 changes: 5 additions & 0 deletions docs/changelog/117575.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 117575
summary: Fix enrich cache size setting name
area: Ingest Node
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/117994.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 117994
summary: Even better(er) binary quantization
area: Vector Search
type: enhancement
issues: []
11 changes: 11 additions & 0 deletions docs/changelog/118192.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
pr: 118192
summary: Remove `client.type` setting
area: Infra/Core
type: breaking
issues: [104574]
breaking:
title: Remove `client.type` setting
area: Cluster and node setting
details: The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now removed.
impact: Remove the `client.type` setting from `elasticsearch.yml`
notable: false
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[[transforms-migration-guide]]
== {transforms-cap} migration guide
This migration guide helps you upgrade your {transforms} to work with the 9.0 release. Each section outlines a breaking change and any manual steps needed to upgrade your {transforms} to be compatible with 9.0.


=== Updating deprecated {transform} roles (`data_frame_transforms_admin` and `data_frame_transforms_user`)
If you have existing {transforms} that use deprecated {transform} roles (`data_frame_transforms_admin` or `data_frame_transforms_user`) you must update them to use the new equivalent {transform} roles (`transform_admin` or `transform_user`). To update your {transform} roles:
1. Switch to a user with the `transform_admin` role (to replace `data_frame_transforms_admin`) or the `transform_user` role (to replace `data_frame_transforms_user`).
2. Call the <<update-transform, update {transforms} API>> with that user.
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException {

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
blobStore.deleteBlobsIgnoringIfNotExists(purpose, new Iterator<>() {
blobStore.deleteBlobs(purpose, new Iterator<>() {
@Override
public boolean hasNext() {
return blobNames.hasNext();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,7 @@ public DeleteResult deleteBlobDirectory(OperationPurpose purpose, String path) t
return new DeleteResult(blobsDeleted.get(), bytesDeleted.get());
}

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
void deleteBlobs(OperationPurpose purpose, Iterator<String> blobNames) {
if (blobNames.hasNext() == false) {
return;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ public void testRetriesAndOperationsAreTrackedSeparately() throws IOException {
false
);
case LIST_BLOBS -> blobStore.listBlobsByPrefix(purpose, randomIdentifier(), randomIdentifier());
case BLOB_BATCH -> blobStore.deleteBlobsIgnoringIfNotExists(
case BLOB_BATCH -> blobStore.deleteBlobs(
purpose,
List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()
);
Expand Down Expand Up @@ -113,7 +113,7 @@ public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException
os.flush();
});
// BLOB_BATCH
blobStore.deleteBlobsIgnoringIfNotExists(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator());
blobStore.deleteBlobs(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator());

Map<String, BlobStoreActionStats> stats = blobStore.stats();
String statsMapString = stats.toString();
Expand Down Expand Up @@ -148,10 +148,7 @@ public void testOperationPurposeIsNotReflectedInBlobStoreStatsWhenNotServerless(
os.flush();
});
// BLOB_BATCH
blobStore.deleteBlobsIgnoringIfNotExists(
purpose,
List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator()
);
blobStore.deleteBlobs(purpose, List.of(randomIdentifier(), randomIdentifier(), randomIdentifier()).iterator());
}

Map<String, BlobStoreActionStats> stats = blobStore.stats();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,12 +114,12 @@ public void writeBlobAtomic(OperationPurpose purpose, String blobName, BytesRefe

@Override
public DeleteResult delete(OperationPurpose purpose) throws IOException {
return blobStore.deleteDirectory(purpose, path().buildAsString());
return blobStore.deleteDirectory(path().buildAsString());
}

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
blobStore.deleteBlobsIgnoringIfNotExists(purpose, new Iterator<>() {
blobStore.deleteBlobs(new Iterator<>() {
@Override
public boolean hasNext() {
return blobNames.hasNext();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.BlobStoreActionStats;
import org.elasticsearch.common.blobstore.DeleteResult;
import org.elasticsearch.common.blobstore.OperationPurpose;
import org.elasticsearch.common.blobstore.OptionalBytesReference;
import org.elasticsearch.common.blobstore.support.BlobContainerUtils;
import org.elasticsearch.common.blobstore.support.BlobMetadata;
Expand Down Expand Up @@ -491,18 +490,17 @@ private void writeBlobMultipart(BlobInfo blobInfo, byte[] buffer, int offset, in
/**
* Deletes the given path and all its children.
*
* @param purpose The purpose of the delete operation
* @param pathStr Name of path to delete
*/
DeleteResult deleteDirectory(OperationPurpose purpose, String pathStr) throws IOException {
DeleteResult deleteDirectory(String pathStr) throws IOException {
return SocketAccess.doPrivilegedIOException(() -> {
DeleteResult deleteResult = DeleteResult.ZERO;
Page<Blob> page = client().list(bucketName, BlobListOption.prefix(pathStr));
do {
final AtomicLong blobsDeleted = new AtomicLong(0L);
final AtomicLong bytesDeleted = new AtomicLong(0L);
final Iterator<Blob> blobs = page.getValues().iterator();
deleteBlobsIgnoringIfNotExists(purpose, new Iterator<>() {
deleteBlobs(new Iterator<>() {
@Override
public boolean hasNext() {
return blobs.hasNext();
Expand All @@ -526,11 +524,9 @@ public String next() {
/**
* Deletes multiple blobs from the specific bucket using a batch request
*
* @param purpose the purpose of the delete operation
* @param blobNames names of the blobs to delete
*/
@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
void deleteBlobs(Iterator<String> blobNames) throws IOException {
if (blobNames.hasNext() == false) {
return;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -342,10 +342,10 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException {
return summary.getKey();
});
if (list.isTruncated()) {
blobStore.deleteBlobsIgnoringIfNotExists(purpose, blobNameIterator);
blobStore.deleteBlobs(purpose, blobNameIterator);
prevListing = list;
} else {
blobStore.deleteBlobsIgnoringIfNotExists(purpose, Iterators.concat(blobNameIterator, Iterators.single(keyPath)));
blobStore.deleteBlobs(purpose, Iterators.concat(blobNameIterator, Iterators.single(keyPath)));
break;
}
}
Expand All @@ -357,7 +357,7 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException {

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
blobStore.deleteBlobsIgnoringIfNotExists(purpose, Iterators.map(blobNames, this::buildKey));
blobStore.deleteBlobs(purpose, Iterators.map(blobNames, this::buildKey));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -340,8 +340,7 @@ public BlobContainer blobContainer(BlobPath path) {
return new S3BlobContainer(path, this);
}

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
void deleteBlobs(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
if (blobNames.hasNext() == false) {
return;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.BlobStoreException;
import org.elasticsearch.common.blobstore.OperationPurpose;
import org.elasticsearch.common.blobstore.url.http.HttpURLBlobContainer;
import org.elasticsearch.common.blobstore.url.http.URLHttpClient;
import org.elasticsearch.common.blobstore.url.http.URLHttpClientSettings;
Expand All @@ -23,10 +22,8 @@
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.CheckedFunction;

import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Iterator;
import java.util.List;

/**
Expand Down Expand Up @@ -109,11 +106,6 @@ public BlobContainer blobContainer(BlobPath blobPath) {
}
}

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
throw new UnsupportedOperationException("Bulk deletes are not supported in URL repositories");
}

@Override
public void close() {
// nothing to do here...
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,8 @@
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.OperationPurpose;

import java.io.IOException;
import java.util.Iterator;

final class HdfsBlobStore implements BlobStore {

Expand Down Expand Up @@ -72,11 +70,6 @@ public BlobContainer blobContainer(BlobPath path) {
return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext, replicationFactor);
}

@Override
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobNames) throws IOException {
throw new UnsupportedOperationException("Bulk deletes are not supported in Hdfs repositories");
}

private Path buildHdfsPath(BlobPath blobPath) {
final Path path = translateToHdfsPath(blobPath);
if (readOnly == false) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,6 @@ public void testSnapshotAndRestore() throws Exception {
testSnapshotAndRestore(false);
}

@Override
public void testBlobStoreBulkDeletion() throws Exception {
// HDFS does not implement bulk deletion from different BlobContainers
}

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(HdfsPlugin.class);
Expand Down
2 changes: 2 additions & 0 deletions rest-api-spec/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task ->
task.skipTest("logsdb/20_source_mapping/include/exclude is supported with stored _source", "no longer serialize source_mode")
task.skipTest("logsdb/20_source_mapping/synthetic _source is default", "no longer serialize source_mode")
task.skipTest("search/520_fetch_fields/fetch _seq_no via fields", "error code is changed from 5xx to 400 in 9.0")
task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions")
task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions")
})
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,11 @@ setup:
number_of_shards: 1
mappings:
properties:
name:
type: keyword
vector:
type: dense_vector
dims: 64
index: true
similarity: l2_norm
index_options:
type: bbq_hnsw
another_vector:
type: dense_vector
dims: 64
index: true
similarity: l2_norm
similarity: max_inner_product
index_options:
type: bbq_hnsw

Expand All @@ -33,9 +24,14 @@ setup:
index: bbq_hnsw
id: "1"
body:
name: cow.jpg
vector: [300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0, 230.0, 300.33, -34.8988, 15.555, -200.0]
another_vector: [115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0, 130.0, 115.0, -1.02, 15.555, -100.0]
vector: [0.077, 0.32 , -0.205, 0.63 , 0.032, 0.201, 0.167, -0.313,
0.176, 0.531, -0.375, 0.334, -0.046, 0.078, -0.349, 0.272,
0.307, -0.083, 0.504, 0.255, -0.404, 0.289, -0.226, -0.132,
-0.216, 0.49 , 0.039, 0.507, -0.307, 0.107, 0.09 , -0.265,
-0.285, 0.336, -0.272, 0.369, -0.282, 0.086, -0.132, 0.475,
-0.224, 0.203, 0.439, 0.064, 0.246, -0.396, 0.297, 0.242,
-0.028, 0.321, -0.022, -0.009, -0.001 , 0.031, -0.533, 0.45,
-0.683, 1.331, 0.194, -0.157, -0.1 , -0.279, -0.098, -0.176]
# Flush in order to provoke a merge later
- do:
indices.flush:
Expand All @@ -46,9 +42,14 @@ setup:
index: bbq_hnsw
id: "2"
body:
name: moose.jpg
vector: [100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0, -0.5, 100.0, -13, 14.8, -156.0]
another_vector: [50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120, -0.5, 50.0, -1, 1, 120]
vector: [0.196, 0.514, 0.039, 0.555, -0.042, 0.242, 0.463, -0.348,
-0.08 , 0.442, -0.067, -0.05 , -0.001, 0.298, -0.377, 0.048,
0.307, 0.159, 0.278, 0.119, -0.057, 0.333, -0.289, -0.438,
-0.014, 0.361, -0.169, 0.292, -0.229, 0.123, 0.031, -0.138,
-0.139, 0.315, -0.216, 0.322, -0.445, -0.059, 0.071, 0.429,
-0.602, -0.142, 0.11 , 0.192, 0.259, -0.241, 0.181, -0.166,
0.082, 0.107, -0.05 , 0.155, 0.011, 0.161, -0.486, 0.569,
-0.489, 0.901, 0.208, 0.011, -0.209, -0.153, -0.27 , -0.013]
# Flush in order to provoke a merge later
- do:
indices.flush:
Expand All @@ -60,8 +61,14 @@ setup:
id: "3"
body:
name: rabbit.jpg
vector: [111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0, 0.5, 111.3, -13.0, 14.8, -156.0]
another_vector: [11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0, -0.5, 11.0, 0, 12, 111.0]
vector: [0.139, 0.178, -0.117, 0.399, 0.014, -0.139, 0.347, -0.33 ,
0.139, 0.34 , -0.052, -0.052, -0.249, 0.327, -0.288, 0.049,
0.464, 0.338, 0.516, 0.247, -0.104, 0.259, -0.209, -0.246,
-0.11 , 0.323, 0.091, 0.442, -0.254, 0.195, -0.109, -0.058,
-0.279, 0.402, -0.107, 0.308, -0.273, 0.019, 0.082, 0.399,
-0.658, -0.03 , 0.276, 0.041, 0.187, -0.331, 0.165, 0.017,
0.171, -0.203, -0.198, 0.115, -0.007, 0.337, -0.444, 0.615,
-0.657, 1.285, 0.2 , -0.062, 0.038, 0.089, -0.068, -0.058]
# Flush in order to provoke a merge later
- do:
indices.flush:
Expand All @@ -73,20 +80,33 @@ setup:
max_num_segments: 1
---
"Test knn search":
- requires:
capabilities:
- method: POST
path: /_search
capabilities: [ optimized_scalar_quantization_bbq ]
test_runner_features: capabilities
reason: "BBQ scoring improved and changed with optimized_scalar_quantization_bbq"
- do:
search:
index: bbq_hnsw
body:
knn:
field: vector
query_vector: [ 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0, -0.5, 90.0, -10, 14.8, -156.0]
query_vector: [0.128, 0.067, -0.08 , 0.395, -0.11 , -0.259, 0.473, -0.393,
0.292, 0.571, -0.491, 0.444, -0.288, 0.198, -0.343, 0.015,
0.232, 0.088, 0.228, 0.151, -0.136, 0.236, -0.273, -0.259,
-0.217, 0.359, -0.207, 0.352, -0.142, 0.192, -0.061, -0.17 ,
-0.343, 0.189, -0.221, 0.32 , -0.301, -0.1 , 0.005, 0.232,
-0.344, 0.136, 0.252, 0.157, -0.13 , -0.244, 0.193, -0.034,
-0.12 , -0.193, -0.102, 0.252, -0.185, -0.167, -0.575, 0.582,
-0.426, 0.983, 0.212, 0.204, 0.03 , -0.276, -0.425, -0.158]
k: 3
num_candidates: 3

# Depending on how things are distributed, docs 2 and 3 might be swapped
# here we verify that are last hit is always the worst one
- match: { hits.hits.2._id: "1" }

- match: { hits.hits.0._id: "1" }
- match: { hits.hits.1._id: "3" }
- match: { hits.hits.2._id: "2" }
---
"Test bad quantization parameters":
- do:
Expand Down
Loading

0 comments on commit 3cba4ad

Please sign in to comment.