Skip to content

Commit

Permalink
Merge branch 'master' into feature/query-refactoring
Browse files Browse the repository at this point in the history
Conflicts:
	core/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java
  • Loading branch information
javanna committed Jun 9, 2015
2 parents 94f6144 + fa879a6 commit 536492a
Show file tree
Hide file tree
Showing 22 changed files with 225 additions and 59 deletions.
15 changes: 6 additions & 9 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,21 +16,18 @@ docs/build.log
backwards/

## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
## The only configuration files which are not ignored are certain files in
## .settings (as listed below) since these files ensure common coding
## style across Eclipse and IDEA.
## Other files (.project, .classpath) should be generated through Maven which
## will correctly set the classpath based on the declared dependencies.
## All files (.project, .classpath, .settings/*) should be generated through Maven which
## will correctly set the classpath based on the declared dependencies and write settings
## files to ensure common coding style across Eclipse and IDEA.
.project
.classpath
/.settings
eclipse-build
*/.project
*/.classpath
*/eclipse-build
/.settings/
!/.settings/org.eclipse.core.resources.prefs
!/.settings/org.eclipse.jdt.core.prefs
!/.settings/org.eclipse.jdt.ui.prefs
*/.settings
plugins/*/.settings

## netbeans ignores
nb-configuration.xml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

package org.elasticsearch.common.component;

import com.google.common.base.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
Expand Down Expand Up @@ -51,4 +52,22 @@ public AbstractComponent(Settings settings, Class customClass) {
public final String nodeName() {
return settings.get("name", "");
}

/**
* Checks for a deprecated setting and logs the correct alternative
*/
protected void logDeprecatedSetting(String settingName, String alternativeName) {
if (!Strings.isNullOrEmpty(settings.get(settingName))) {
deprecationLogger.deprecated("Setting [{}] is deprecated, use [{}] instead", settingName, alternativeName);
}
}

/**
* Checks for a removed setting and logs the correct alternative
*/
protected void logRemovedSetting(String settingName, String alternativeName) {
if (!Strings.isNullOrEmpty(settings.get(settingName))) {
deprecationLogger.deprecated("Setting [{}] has been removed, use [{}] instead", settingName, alternativeName);
}
}
}
10 changes: 10 additions & 0 deletions core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@

import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;

import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.*;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
Expand Down Expand Up @@ -116,11 +118,15 @@ public String toString() {
// Setting to enable custom index.data_path setting for new indices
public static final String SETTING_CUSTOM_DATA_PATH_ENABLED = "node.enable_custom_paths";

// If enabled, the [verbose] SegmentInfos.infoStream logging is sent to System.out:
public static final String SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE = "node.enable_lucene_segment_infos_trace";

public static final String NODES_FOLDER = "nodes";
public static final String INDICES_FOLDER = "indices";
public static final String NODE_LOCK_FILENAME = "node.lock";

@Inject
@SuppressForbidden(reason = "System.out.*")
public NodeEnvironment(Settings settings, Environment environment) throws IOException {
super(settings);

Expand Down Expand Up @@ -186,6 +192,10 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce
}

maybeLogPathDetails();

if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) {
SegmentInfos.setInfoStream(System.out);
}
}

private static void releaseAndNullLocks(Lock[] locks) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,25 @@ public IdsQueryBuilder addIds(String... ids) {
/**
* Adds ids to the query.
*/
public IdsQueryBuilder addIds(Collection<String> ids) {
this.ids.addAll(ids);
return this;
}

/**
* Adds ids to the filter.
*/
public IdsQueryBuilder ids(String... ids) {
return addIds(ids);
}

/**
* Adds ids to the filter.
*/
public IdsQueryBuilder ids(Collection<String> ids) {
return addIds(ids);
}

/**
* Returns the ids for the query.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1327,7 +1327,7 @@ private Tuple<DocumentMapper, Mapping> docMapper(String type) {
}

private final EngineConfig newEngineConfig(TranslogConfig translogConfig) {
final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(mapperService, mapperAnalyzer, queryParserService, indexAliasesService, indexCache) {
final TranslogRecoveryPerformer translogRecoveryPerformer = new TranslogRecoveryPerformer(shardId, mapperService, mapperAnalyzer, queryParserService, indexAliasesService, indexCache) {
@Override
protected void operationProcessed() {
assert recoveryState != null;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,10 @@ public class TranslogRecoveryPerformer {
private final IndexCache indexCache;
private final MapperAnalyzer mapperAnalyzer;
private final Map<String, Mapping> recoveredTypes = new HashMap<>();
private final ShardId shardId;

protected TranslogRecoveryPerformer(MapperService mapperService, MapperAnalyzer mapperAnalyzer, IndexQueryParserService queryParserService, IndexAliasesService indexAliasesService, IndexCache indexCache) {
protected TranslogRecoveryPerformer(ShardId shardId, MapperService mapperService, MapperAnalyzer mapperAnalyzer, IndexQueryParserService queryParserService, IndexAliasesService indexAliasesService, IndexCache indexCache) {
this.shardId = shardId;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.indexAliasesService = indexAliasesService;
Expand All @@ -76,13 +78,33 @@ protected Tuple<DocumentMapper, Mapping> docMapper(String type) {
*/
int performBatchRecovery(Engine engine, Iterable<Translog.Operation> operations) {
int numOps = 0;
for (Translog.Operation operation : operations) {
performRecoveryOperation(engine, operation, false);
numOps++;
try {
for (Translog.Operation operation : operations) {
performRecoveryOperation(engine, operation, false);
numOps++;
}
} catch (Throwable t) {
throw new BatchOperationException(shardId, "failed to apply batch translog operation [" + t.getMessage() + "]", numOps, t);
}
return numOps;
}

public static class BatchOperationException extends IndexShardException {

private final int completedOperations;

public BatchOperationException(ShardId shardId, String msg, int completedOperations, Throwable cause) {
super(shardId, msg, cause);
this.completedOperations = completedOperations;
}


/** the number of succesful operations performed before the exception was thrown */
public int completedOperations() {
return completedOperations;
}
}

private void maybeAddMappingUpdate(String type, Mapping update, String docId, boolean allowMappingUpdates) {
if (update == null) {
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
Expand Down Expand Up @@ -506,6 +505,13 @@ public synchronized void incrementRecoveredOperations() {
assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]";
}

public synchronized void decrementRecoveredOperations(int ops) {
recovered -= ops;
assert recovered >= 0 : "recovered operations must be non-negative. Because [" + recovered + "] after decrementing [" + ops + "]";
assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]";
}


/**
* returns the total number of translog operations recovered so far
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,7 @@
import org.elasticsearch.index.engine.RecoveryEngineException;
import org.elasticsearch.index.mapper.MapperException;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.*;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.IndexMissingException;
import org.elasticsearch.indices.IndicesLifecycle;
Expand Down Expand Up @@ -308,10 +305,14 @@ public void messageReceived(final RecoveryTranslogOperationsRequest request, fin
assert recoveryStatus.indexShard().recoveryState() == recoveryStatus.state();
try {
recoveryStatus.indexShard().performBatchRecovery(request.operations());
} catch (MapperException mapperException) {
} catch (TranslogRecoveryPerformer.BatchOperationException exception) {
if (ExceptionsHelper.unwrapCause(exception) instanceof MapperException == false) {
throw exception;
}
// in very rare cases a translog replay from primary is processed before a mapping update on this node
// which causes local mapping changes. we want to wait until these mappings are processed.
logger.trace("delaying recovery due to missing mapping changes", mapperException);
logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception.completedOperations());
translog.decrementRecoveredOperations(exception.completedOperations());
// we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be
// canceled)
observer.waitForNextChange(new ClusterStateObserver.Listener() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ void documentation(StringBuilder sb) {

@Override
public void doRequest(final RestRequest request, final RestChannel channel, final Client client) {
final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes"));
final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes", "data:true"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().routingTable(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ public RestFielddataAction(Settings settings, RestController controller, Client
@Override
void doRequest(final RestRequest request, final RestChannel channel, final Client client) {

final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest();
final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("data:true");
nodesStatsRequest.clear();
nodesStatsRequest.indices(true);
String[] fields = request.paramAsStringArray("fields", null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@
*
* Other elasticsearch services can register their resource watchers with this service using {@link #add(ResourceWatcher)}
* method. This service will call {@link org.elasticsearch.watcher.ResourceWatcher#checkAndNotify()} method of all
* registered watcher periodically. The frequency of checks can be specified using {@code watcher.interval} setting, which
* defaults to {@code 60s}. The service can be disabled by setting {@code watcher.enabled} setting to {@code false}.
* registered watcher periodically. The frequency of checks can be specified using {@code resource.reload.interval} setting, which
* defaults to {@code 60s}. The service can be disabled by setting {@code resource.reload.enabled} setting to {@code false}.
*/
public class ResourceWatcherService extends AbstractLifecycleComponent<ResourceWatcherService> {

public static enum Frequency {
public enum Frequency {

/**
* Defaults to 5 seconds
Expand All @@ -59,7 +59,7 @@ public static enum Frequency {

final TimeValue interval;

private Frequency(TimeValue interval) {
Frequency(TimeValue interval) {
this.interval = interval;
}
}
Expand All @@ -78,15 +78,21 @@ private Frequency(TimeValue interval) {
@Inject
public ResourceWatcherService(Settings settings, ThreadPool threadPool) {
super(settings);
this.enabled = settings.getAsBoolean("watcher.enabled", true);
this.enabled = settings.getAsBoolean("resource.reload.enabled", true);
this.threadPool = threadPool;

TimeValue interval = settings.getAsTime("watcher.interval.low", Frequency.LOW.interval);
TimeValue interval = settings.getAsTime("resource.reload.interval.low", Frequency.LOW.interval);
lowMonitor = new ResourceMonitor(interval, Frequency.LOW);
interval = settings.getAsTime("watcher.interval.medium", settings.getAsTime("watcher.interval", Frequency.MEDIUM.interval));
interval = settings.getAsTime("resource.reload.interval.medium", settings.getAsTime("resource.reload.interval", Frequency.MEDIUM.interval));
mediumMonitor = new ResourceMonitor(interval, Frequency.MEDIUM);
interval = settings.getAsTime("watcher.interval.high", Frequency.HIGH.interval);
interval = settings.getAsTime("resource.reload.interval.high", Frequency.HIGH.interval);
highMonitor = new ResourceMonitor(interval, Frequency.HIGH);

logRemovedSetting("watcher.enabled", "resource.reload.enabled");
logRemovedSetting("watcher.interval", "resource.reload.interval");
logRemovedSetting("watcher.interval.low", "resource.reload.interval.low");
logRemovedSetting("watcher.interval.medium", "resource.reload.interval.medium");
logRemovedSetting("watcher.interval.high", "resource.reload.interval.high");
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1820,7 +1820,7 @@ public static class TranslogHandler extends TranslogRecoveryPerformer {
public final AtomicInteger recoveredOps = new AtomicInteger(0);

public TranslogHandler(String indexName) {
super(null, new MapperAnalyzer(null), null, null, null);
super(new ShardId("test", 0), null, new MapperAnalyzer(null), null, null, null);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test");
Index index = new Index(indexName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,10 @@ Translog createObj() {
for (int j = iterationOps; j > 0; j--) {
ops++;
translog.incrementRecoveredOperations();
if (randomBoolean()) {
translog.decrementRecoveredOperations(1);
translog.incrementRecoveredOperations();
}
}
assertThat(translog.recoveredOperations(), equalTo(ops));
assertThat(translog.totalOperations(), equalTo(totalOps));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public void testSettings() throws Exception {

// checking bwc
settings = Settings.builder()
.put("watcher.interval", "40s") // only applies to medium
.put("resource.reload.interval", "40s") // only applies to medium
.build();
service = new ResourceWatcherService(settings, threadPool);
assertThat(service.highMonitor.interval.millis(), is(timeValueSeconds(5).millis()));
Expand All @@ -54,9 +54,9 @@ public void testSettings() throws Exception {

// checking custom
settings = Settings.builder()
.put("watcher.interval.high", "10s")
.put("watcher.interval.medium", "20s")
.put("watcher.interval.low", "30s")
.put("resource.reload.interval.high", "10s")
.put("resource.reload.interval.medium", "20s")
.put("resource.reload.interval.low", "30s")
.build();
service = new ResourceWatcherService(settings, threadPool);
assertThat(service.highMonitor.interval.millis(), is(timeValueSeconds(10).millis()));
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/cat/allocation.asciidoc
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
[[cat-allocation]]
== cat allocation

`allocation` provides a snapshot of how shards have located around the
cluster and the state of disk usage.
`allocation` provides a snapshot of how many shards are allocated to each data node
and how much disk space they are using.

[source,shell]
--------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions docs/reference/cat/fielddata.asciidoc
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
[[cat-fielddata]]
== cat fielddata

`fielddata` shows information about currently loaded fielddata on a per-node
basis.
`fielddata` shows how much heap memory is currently being used by fielddata
on every data node in the cluster.

[source,shell]
--------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/index-modules/merge.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ how many merges are requested over time. An application with a low
indexing rate that unluckily suddenly requires a large merge will see
that merge aggressively throttled, while an application doing heavy
indexing will see the throttle move higher to allow merges to keep up
with ongoing indexing. This is a dynamic setting (you can <<../indices/update-settings,change it
with ongoing indexing. This is a dynamic setting (you can <<indices-update-settings,change it
at any time on a running index>>).

[float]
Expand Down
11 changes: 11 additions & 0 deletions docs/reference/migration/migrate_2_0.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -685,3 +685,14 @@ curl -XGET 'localhost:9200/test/_search?fields=_timestamp,foo'
}
}
---------------

=== Settings for resource watcher have been renamed

The setting names for configuring the resource watcher have been renamed
to prevent clashes with the watcher plugin

* `watcher.enabled` is now `resource.reload.enabled`
* `watcher.interval` is now `resource.reload.interval`
* `watcher.interval.low` is now `resource.reload.interval.low`
* `watcher.interval.medium` is now `resource.reload.interval.medium`
* `watcher.interval.high` is now `resource.reload.interval.high`
2 changes: 1 addition & 1 deletion docs/reference/modules/scripting.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ appropriate language.
The `config/scripts` directory is scanned periodically for changes.
New and changed scripts are reloaded and deleted script are removed
from preloaded scripts cache. The reload frequency can be specified
using `watcher.interval` setting, which defaults to `60s`.
using `resource.reload.interval` setting, which defaults to `60s`.
To disable script reloading completely set `script.auto_reload_enabled`
to `false`.

Expand Down
Loading

0 comments on commit 536492a

Please sign in to comment.