Skip to content

Commit

Permalink
Merge branch 'main' into agg-filter-class-cast-exception
Browse files Browse the repository at this point in the history
  • Loading branch information
elasticmachine authored Nov 6, 2024
2 parents cdf7eca + 8db9181 commit 06e7330
Show file tree
Hide file tree
Showing 32 changed files with 412 additions and 246 deletions.
2 changes: 0 additions & 2 deletions .buildkite/pipelines/periodic-packaging.template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,6 @@ steps:
matrix:
setup:
image:
- windows-2016
- windows-2019
- windows-2022
agents:
provider: gcp
Expand Down
2 changes: 0 additions & 2 deletions .buildkite/pipelines/periodic-packaging.yml
Original file line number Diff line number Diff line change
Expand Up @@ -345,8 +345,6 @@ steps:
matrix:
setup:
image:
- windows-2016
- windows-2019
- windows-2022
agents:
provider: gcp
Expand Down
2 changes: 0 additions & 2 deletions .buildkite/pipelines/periodic-platform-support.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ steps:
matrix:
setup:
image:
- windows-2016
- windows-2019
- windows-2022
GRADLE_TASK:
- checkPart1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ steps:
matrix:
setup:
image:
- windows-2019
- windows-2022
PACKAGING_TASK:
- default-windows-archive
agents:
Expand Down
2 changes: 0 additions & 2 deletions .buildkite/pipelines/pull-request/packaging-tests-windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ steps:
matrix:
setup:
image:
- windows-2016
- windows-2019
- windows-2022
PACKAGING_TASK:
- default-windows-archive
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/113713.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 113713
summary: Adding inference endpoint validation for `AzureAiStudioService`
area: Machine Learning
type: enhancement
issues: []
15 changes: 15 additions & 0 deletions docs/changelog/114207.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
pr: 114207
summary: Remove `cluster.routing.allocation.disk.watermark.enable_for_single_data_node` setting
area: Allocation
type: breaking
issues: []
breaking:
title: Remove `cluster.routing.allocation.disk.watermark.enable_for_single_data_node` setting
area: 'Cluster and node setting'
details: >-
Prior to 7.8, whenever a cluster had only a single data node, the watermarks would not be respected.
In order to change this in 7.8+ in a backwards compatible way, we introduced the
`cluster.routing.allocation.disk.watermark.enable_for_single_data_node`
node setting. The setting was deprecated in 7.14 and was made to accept only true in 8.0
impact: No known end user impact
notable: false
14 changes: 9 additions & 5 deletions docs/reference/connector/docs/connectors-ms-sql.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ include::_connectors-create-native.asciidoc[]
To use this connector as a *managed connector*, use the *Connector* workflow.
See <<es-native-connectors>>.
Users require the `sysadmin` server role.
Users require the `sysadmin` SQL Server role.
Note that SQL Server Authentication is required.
Windows Authentication is not supported.
For additional operations, see <<es-connectors-usage>>.
Expand Down Expand Up @@ -75,10 +77,10 @@ Port::
The port where the Microsoft SQL Server is hosted. Default value is `1433`.
Username::
The username of the account for Microsoft SQL Server.
The username of the account for Microsoft SQL Server (SQL Server Authentication only).
Password::
The password of the account to be used for the Microsoft SQL Server.
The password of the account to be used for the Microsoft SQL Server (SQL Server Authentication only).
Database::
Name of the Microsoft SQL Server database.
Expand Down Expand Up @@ -310,6 +312,8 @@ include::_connectors-create-client.asciidoc[]
===== Usage
Users require the `sysadmin` server role.
Note that SQL Server Authentication is required.
Windows Authentication is not supported.
To use this connector as a *self-managed connector*, see <<es-build-connector>>
For additional usage operations, see <<es-connectors-usage>>.
Expand Down Expand Up @@ -350,10 +354,10 @@ Examples:
The port where the Microsoft SQL Server is hosted. Default value is `9090`.
`username`::
The username of the account for Microsoft SQL Server.
The username of the account for Microsoft SQL Server. (SQL Server Authentication only)
`password`::
The password of the account to be used for the Microsoft SQL Server.
The password of the account to be used for the Microsoft SQL Server. (SQL Server Authentication only)
`database`::
Name of the Microsoft SQL Server database.
Expand Down
12 changes: 3 additions & 9 deletions muted-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,6 @@ tests:
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
method: test {p0=search/500_date_range/from, to, include_lower, include_upper deprecated}
issue: https://github.com/elastic/elasticsearch/pull/113286
- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT
method: test {categorize.Categorize}
issue: https://github.com/elastic/elasticsearch/issues/113428
- class: org.elasticsearch.integration.KibanaUserRoleIntegTests
method: testFieldMappings
issue: https://github.com/elastic/elasticsearch/issues/113592
Expand All @@ -94,9 +91,6 @@ tests:
- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT
method: test {categorize.Categorize SYNC}
issue: https://github.com/elastic/elasticsearch/issues/113722
- class: org.elasticsearch.threadpool.SimpleThreadPoolIT
method: testThreadPoolMetrics
issue: https://github.com/elastic/elasticsearch/issues/108320
- class: org.elasticsearch.kibana.KibanaThreadPoolIT
method: testBlockedThreadPoolsRejectUserRequests
issue: https://github.com/elastic/elasticsearch/issues/113939
Expand All @@ -118,9 +112,6 @@ tests:
- class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT
method: testTracingCrossCluster
issue: https://github.com/elastic/elasticsearch/issues/112731
- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT
method: testInferDeploysDefaultElser
issue: https://github.com/elastic/elasticsearch/issues/114913
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)}
issue: https://github.com/elastic/elasticsearch/issues/115231
Expand Down Expand Up @@ -294,6 +285,9 @@ tests:
- class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests
method: testBottomFieldSort
issue: https://github.com/elastic/elasticsearch/issues/116249
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set}
issue: https://github.com/elastic/elasticsearch/issues/116332

# Examples:
#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@
import static org.junit.Assume.assumeThat;
import static org.junit.Assume.assumeTrue;

@PackagingTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/116299")
public class ArchiveTests extends PackagingTestCase {

@BeforeClass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ public DesiredBalance compute(
nextReportTime = currentTime + timeWarningInterval;
}

if (hasComputationConverged(hasChanges, i)) {
if (hasChanges == false && hasEnoughIterations(i)) {
logger.debug(
"Desired balance computation for [{}] converged after [{}] and [{}] iterations",
desiredBalanceInput.index(),
Expand Down Expand Up @@ -415,8 +415,8 @@ public DesiredBalance compute(
}

// visible for testing
boolean hasComputationConverged(boolean hasRoutingChanges, int currentIteration) {
return hasRoutingChanges == false;
boolean hasEnoughIterations(int currentIteration) {
return true;
}

private static Map<ShardId, ShardAssignment> collectShardAssignments(RoutingNodes routingNodes) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.snapshots.SnapshotShardSizeInfo;

import java.util.Map;
Expand Down Expand Up @@ -72,25 +70,6 @@ public class DiskThresholdDecider extends AllocationDecider {

public static final String NAME = "disk_threshold";

@UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION)
public static final Setting<Boolean> ENABLE_FOR_SINGLE_DATA_NODE = Setting.boolSetting(
"cluster.routing.allocation.disk.watermark.enable_for_single_data_node",
true,
new Setting.Validator<>() {
@Override
public void validate(Boolean value) {
if (value == Boolean.FALSE) {
throw new SettingsException(
"setting [{}=false] is not allowed, only true is valid",
ENABLE_FOR_SINGLE_DATA_NODE.getKey()
);
}
}
},
Setting.Property.NodeScope,
Setting.Property.DeprecatedWarning
);

public static final Setting<Boolean> SETTING_IGNORE_DISK_WATERMARKS = Setting.boolSetting(
"index.routing.allocation.disk.watermark.ignore",
false,
Expand All @@ -102,9 +81,6 @@ public void validate(Boolean value) {

public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) {
this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings);
// get deprecation warnings.
boolean enabledForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings);
assert enabledForSingleDataNode;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
Expand Down Expand Up @@ -272,7 +271,6 @@ public void apply(Settings value, Settings current, Settings previous) {
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,15 @@ default Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) {
return model;
}

/**
* Update a chat completion model's max tokens if required. The default behaviour is to just return the model.
* @param model The original model without updated embedding details
* @return The model with updated chat completion details
*/
default Model updateModelWithChatCompletionDetails(Model model) {
return model;
}

/**
* Defines the version required across all clusters to use this service
* @return {@link TransportVersion} specifying the version
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,32 +23,20 @@
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;

/**
* Serialization and merge logic for {@link GeoCentroidAggregator}.
*/
public abstract class InternalCentroid extends InternalAggregation implements CentroidAggregation {
protected final SpatialPoint centroid;
protected final long count;
private final FieldExtractor firstField;
private final FieldExtractor secondField;

public InternalCentroid(
String name,
SpatialPoint centroid,
long count,
Map<String, Object> metadata,
FieldExtractor firstField,
FieldExtractor secondField
) {

public InternalCentroid(String name, SpatialPoint centroid, long count, Map<String, Object> metadata) {
super(name, metadata);
assert (centroid == null) == (count == 0);
this.centroid = centroid;
assert count >= 0;
this.count = count;
this.firstField = firstField;
this.secondField = secondField;
}

protected abstract SpatialPoint centroidFromStream(StreamInput in) throws IOException;
Expand All @@ -59,16 +47,14 @@ public InternalCentroid(
* Read from a stream.
*/
@SuppressWarnings("this-escape")
protected InternalCentroid(StreamInput in, FieldExtractor firstField, FieldExtractor secondField) throws IOException {
protected InternalCentroid(StreamInput in) throws IOException {
super(in);
count = in.readVLong();
if (in.readBoolean()) {
centroid = centroidFromStream(in);
} else {
centroid = null;
}
this.firstField = firstField;
this.secondField = secondField;
}

@Override
Expand Down Expand Up @@ -110,11 +96,11 @@ public void accept(InternalAggregation aggregation) {
if (centroidAgg.count > 0) {
totalCount += centroidAgg.count;
if (Double.isNaN(firstSum)) {
firstSum = centroidAgg.count * firstField.extractor.apply(centroidAgg.centroid);
secondSum = centroidAgg.count * secondField.extractor.apply(centroidAgg.centroid);
firstSum = centroidAgg.count * extractFirst(centroidAgg.centroid);
secondSum = centroidAgg.count * extractSecond(centroidAgg.centroid);
} else {
firstSum += centroidAgg.count * firstField.extractor.apply(centroidAgg.centroid);
secondSum += centroidAgg.count * secondField.extractor.apply(centroidAgg.centroid);
firstSum += centroidAgg.count * extractFirst(centroidAgg.centroid);
secondSum += centroidAgg.count * extractSecond(centroidAgg.centroid);
}
}
}
Expand All @@ -126,6 +112,14 @@ public InternalAggregation get() {
};
}

protected abstract String nameFirst();

protected abstract double extractFirst(SpatialPoint point);

protected abstract String nameSecond();

protected abstract double extractSecond(SpatialPoint point);

@Override
public InternalAggregation finalizeSampling(SamplingContext samplingContext) {
return copyWith(centroid, samplingContext.scaleUp(count));
Expand All @@ -136,16 +130,6 @@ protected boolean mustReduceOnSingleInternalAgg() {
return false;
}

protected static class FieldExtractor {
private final String name;
private final Function<SpatialPoint, Double> extractor;

public FieldExtractor(String name, Function<SpatialPoint, Double> extractor) {
this.name = name;
this.extractor = extractor;
}
}

protected abstract double extractDouble(String name);

@Override
Expand Down Expand Up @@ -174,8 +158,8 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
if (centroid != null) {
builder.startObject(Fields.CENTROID.getPreferredName());
{
builder.field(firstField.name, firstField.extractor.apply(centroid));
builder.field(secondField.name, secondField.extractor.apply(centroid));
builder.field(nameFirst(), extractFirst(centroid));
builder.field(nameSecond(), extractSecond(centroid));
}
builder.endObject();
}
Expand Down
Loading

0 comments on commit 06e7330

Please sign in to comment.