Skip to content

Commit

Permalink
Merge branch 'master' into generalize-remote-license-check
Browse files Browse the repository at this point in the history
* master:
  NETWORKING: Make RemoteClusterConn. Lazy Resolve DNS (elastic#32764)
  [DOCS] Splits the users API documentation into multiple pages (elastic#32825)
  [DOCS] Splits the token APIs into separate pages (elastic#32865)
  [DOCS] Creates redirects for role management APIs page
  Bypassing failing test PainlessDomainSplitIT#testHRDSplit (elastic#32966)
  TEST: Mute testRetentionPolicyChangeDuringRecovery
  [DOCS] Fixes more broken links to role management APIs
  [Docs] Tweaks and fixes to rollup docs
  [DOCS] Fixes links to role management APIs
  [ML][TEST] Fix BasicRenormalizationIT after adding multibucket feature
  [DOCS] Splits the roles API documentation into multiple pages (elastic#32794)
  [TEST]  Run pre 6.4 nodes in non-FIPS JVMs (elastic#32901)
  Make Geo Context Mapping Parsing More Strict (elastic#32821)
  • Loading branch information
jasontedor committed Aug 18, 2018
2 parents 3744702 + f82bb64 commit 844830d
Show file tree
Hide file tree
Showing 57 changed files with 1,155 additions and 657 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,12 @@ class NodeInfo {
javaVersion = 8
} else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) {
javaVersion = 9
} else if (project.inFipsJvm && nodeVersion.onOrAfter("6.3.0") && nodeVersion.before("6.4.0")) {
/*
* Elasticsearch versions before 6.4.0 cannot be run in a FIPS-140 JVM. If we're running
* bwc tests in a FIPS-140 JVM, ensure that the pre v6.4.0 nodes use a Java 10 JVM instead.
*/
javaVersion = 10
}

args.addAll("-E", "node.portsfile=true")
Expand Down
3 changes: 3 additions & 0 deletions docs/reference/migration/migrate_7_0/search.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,9 @@ deprecated in 6.x, has been removed. Context enabled suggestion queries
without contexts have to visit every suggestion, which degrades the search performance
considerably.

For geo context the value of the `path` parameter is now validated against the mapping,
and the context is only accepted if `path` points to a field with `geo_point` type.

==== Semantics changed for `max_concurrent_shard_requests`

`max_concurrent_shard_requests` used to limit the total number of concurrent shard
Expand Down
28 changes: 28 additions & 0 deletions docs/reference/redirects.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -503,3 +503,31 @@ guide to the {painless}/index.html[Painless Scripting Language].

See the {painless}/painless-api-reference.html[Painless API Reference] in
the guide to the {painless}/index.html[Painless Scripting Language].

[role="exclude", id="security-api-roles"]
=== Role management APIs

You can use the following APIs to add, remove, and retrieve roles in the native realm:

* <<security-api-put-role,Create role>>, <<security-api-delete-role,Delete role>>
* <<security-api-clear-role-cache,Clear roles cache>>
* <<security-api-get-role,Get roles>>

[role="exclude",id="security-api-tokens"]
=== Token management APIs

You can use the following APIs to create and invalidate bearer tokens for access
without requiring basic authentication:

* <<security-api-get-token,Get token>>, <<security-api-invalidate-token,Invalidate token>>

[role="exclude",id="security-api-users"]
=== User Management APIs

You can use the following APIs to create, read, update, and delete users from the
native realm:

* <<security-api-put-user,Create users>>, <<security-api-delete-user,Delete users>>
* <<security-api-enable-user,Enable users>>, <<security-api-disable-user,Disable users>>
* <<security-api-change-password,Change passwords>>
* <<security-api-get-user,Get users>>
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.search.suggest.completion.context.ContextMapping;

import java.io.Closeable;
import java.io.IOException;
Expand Down Expand Up @@ -421,6 +422,8 @@ private synchronized Map<String, DocumentMapper> internalMerge(@Nullable Documen
MapperMergeValidator.validateFieldReferences(fieldMappers, fieldAliasMappers,
fullPathObjectMappers, fieldTypes);

ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get);

if (reason == MergeReason.MAPPING_UPDATE) {
// this check will only be performed on the master node when there is
// a call to the update mapping API. For all other cases like
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
package org.elasticsearch.search.suggest.completion.context;

import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
Expand All @@ -28,13 +29,16 @@
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.ParseContext;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;

/**
* A {@link ContextMapping} defines criteria that can be used to
Expand Down Expand Up @@ -131,6 +135,31 @@ public final List<InternalQueryContext> parseQueryContext(XContentParser parser)
*/
protected abstract XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException;

/**
* Checks if the current context is consistent with the rest of the fields. For example, the GeoContext
* should check that the field that it points to has the correct type.
*/
protected void validateReferences(Version indexVersionCreated, Function<String, MappedFieldType> fieldResolver) {
// No validation is required by default
}

/**
* Verifies that all field paths specified in contexts point to the fields with correct mappings
*/
public static void validateContextPaths(Version indexVersionCreated, List<FieldMapper> fieldMappers,
Function<String, MappedFieldType> fieldResolver) {
for (FieldMapper fieldMapper : fieldMappers) {
if (CompletionFieldMapper.CONTENT_TYPE.equals(fieldMapper.typeName())) {
CompletionFieldMapper.CompletionFieldType fieldType = ((CompletionFieldMapper) fieldMapper).fieldType();
if (fieldType.hasContextMappings()) {
for (ContextMapping context : fieldType.getContextMappings()) {
context.validateReferences(indexVersionCreated, fieldResolver);
}
}
}
}
}

@Override
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(FIELD_NAME, name);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
Expand All @@ -50,7 +51,7 @@
* and creates context queries for defined {@link ContextMapping}s
* for a {@link CompletionFieldMapper}
*/
public class ContextMappings implements ToXContent {
public class ContextMappings implements ToXContent, Iterable<ContextMapping<?>> {

private final List<ContextMapping<?>> contextMappings;
private final Map<String, ContextMapping<?>> contextNameMap;
Expand Down Expand Up @@ -97,6 +98,11 @@ public void addField(ParseContext.Document document, String name, String input,
document.add(new TypedContextField(name, input, weight, contexts, document));
}

@Override
public Iterator<ContextMapping<?>> iterator() {
return contextMappings.iterator();
}

/**
* Field prepends context values with a suggestion
* Context values are associated with a type, denoted by
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,17 @@

package org.elasticsearch.search.suggest.completion.context;

import org.apache.logging.log4j.LogManager;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
Expand All @@ -42,6 +47,7 @@
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;

import static org.elasticsearch.common.geo.GeoHashUtils.addNeighbors;
Expand Down Expand Up @@ -69,6 +75,8 @@ public class GeoContextMapping extends ContextMapping<GeoQueryContext> {
static final String CONTEXT_PRECISION = "precision";
static final String CONTEXT_NEIGHBOURS = "neighbours";

private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class));

private final int precision;
private final String fieldName;

Expand Down Expand Up @@ -205,11 +213,11 @@ public Set<CharSequence> parseContext(Document document) {
for (IndexableField field : fields) {
if (field instanceof StringField) {
spare.resetFromString(field.stringValue());
} else {
// todo return this to .stringValue() once LatLonPoint implements it
geohashes.add(spare.geohash());
} else if (field instanceof LatLonPoint || field instanceof LatLonDocValuesField) {
spare.resetFromIndexableField(field);
geohashes.add(spare.geohash());
}
geohashes.add(spare.geohash());
}
}
}
Expand Down Expand Up @@ -279,6 +287,32 @@ public List<InternalQueryContext> toInternalQueryContexts(List<GeoQueryContext>
return internalQueryContextList;
}

@Override
protected void validateReferences(Version indexVersionCreated, Function<String, MappedFieldType> fieldResolver) {
if (fieldName != null) {
MappedFieldType mappedFieldType = fieldResolver.apply(fieldName);
if (mappedFieldType == null) {
if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping",
"field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name);
} else {
throw new ElasticsearchParseException(
"field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name);
}
} else if (GeoPointFieldMapper.CONTENT_TYPE.equals(mappedFieldType.typeName()) == false) {
if (indexVersionCreated.before(Version.V_7_0_0_alpha1)) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping",
"field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]",
fieldName, name, mappedFieldType.typeName());
} else {
throw new ElasticsearchParseException(
"field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]",
fieldName, name, mappedFieldType.typeName());
}
}
}
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/
package org.elasticsearch.transport;

import java.util.function.Supplier;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
Expand Down Expand Up @@ -48,9 +49,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
/**
* A list of initial seed nodes to discover eligible nodes from the remote cluster
*/
public static final Setting.AffixSetting<List<InetSocketAddress>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.",
"seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress,
Setting.Property.NodeScope, Setting.Property.Dynamic));
public static final Setting.AffixSetting<List<String>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting(
"search.remote.",
"seeds",
key -> Setting.listSetting(
key, Collections.emptyList(),
s -> {
// validate seed address
parsePort(s);
return s;
},
Setting.Property.NodeScope,
Setting.Property.Dynamic
)
);
public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
public static final String LOCAL_CLUSTER_GROUP_KEY = "";

Expand All @@ -65,18 +77,20 @@ protected RemoteClusterAware(Settings settings) {
this.clusterNameResolver = new ClusterNameExpressionResolver(settings);
}

protected static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) {
Stream<Setting<List<InetSocketAddress>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
protected static Map<String, List<Supplier<DiscoveryNode>>> buildRemoteClustersSeeds(Settings settings) {
Stream<Setting<List<String>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
return allConcreteSettings.collect(
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
List<DiscoveryNode> nodes = new ArrayList<>();
for (InetSocketAddress address : concreteSetting.get(settings)) {
TransportAddress transportAddress = new TransportAddress(address);
DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
transportAddress,
Version.CURRENT.minimumCompatibilityVersion());
nodes.add(node);
List<String> addresses = concreteSetting.get(settings);
List<Supplier<DiscoveryNode>> nodes = new ArrayList<>(addresses.size());
for (String address : addresses) {
nodes.add(() -> {
TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address));
return new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
transportAddress,
Version.CURRENT.minimumCompatibilityVersion());
});
}
return nodes;
}));
Expand Down Expand Up @@ -128,7 +142,7 @@ public Map<String, List<String>> groupClusterIndices(String[] requestIndices, Pr
* Subclasses must implement this to receive information about updated cluster aliases. If the given address list is
* empty the cluster alias is unregistered and should be removed.
*/
protected abstract void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses);
protected abstract void updateRemoteCluster(String clusterAlias, List<String> addresses);

/**
* Registers this instance to listen to updates on the cluster settings.
Expand All @@ -138,27 +152,35 @@ public void listenForUpdates(ClusterSettings clusterSettings) {
(namespace, value) -> {});
}

private static InetSocketAddress parseSeedAddress(String remoteHost) {
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
}
String host = remoteHost.substring(0, portSeparator);
protected static InetSocketAddress parseSeedAddress(String remoteHost) {
String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost));
InetAddress hostAddress;
try {
hostAddress = InetAddress.getByName(host);
} catch (UnknownHostException e) {
throw new IllegalArgumentException("unknown host [" + host + "]", e);
}
return new InetSocketAddress(hostAddress, parsePort(remoteHost));
}

private static int parsePort(String remoteHost) {
try {
int port = Integer.valueOf(remoteHost.substring(portSeparator + 1));
int port = Integer.valueOf(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1));
if (port <= 0) {
throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]");
}
return new InetSocketAddress(hostAddress, port);
return port;
} catch (NumberFormatException e) {
throw new IllegalArgumentException("port must be a number", e);
throw new IllegalArgumentException("failed to parse port", e);
}
}

private static int indexOfPortSeparator(String remoteHost) {
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
}
return portSeparator;
}

public static String buildRemoteIndexName(String clusterAlias, String indexName) {
Expand Down
Loading

0 comments on commit 844830d

Please sign in to comment.