getAllTombstones() {
return tombstones;
}
@@ -471,7 +474,8 @@ Releasable acquireLock(BytesRef uid) {
}
boolean assertKeyedLockHeldByCurrentThread(BytesRef uid) {
- assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() + "], uid [" + uid.utf8ToString() + "]";
+ assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() +
+ "], uid [" + uid.utf8ToString() + "]";
return true;
}
}
diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java
index 5c5554cddcfc7..3b84b00b0f8c3 100644
--- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java
+++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java
@@ -61,17 +61,19 @@ public final class ShardGetService extends AbstractIndexShardComponent {
private final IndexShard indexShard;
public ShardGetService(IndexSettings indexSettings, IndexShard indexShard,
- MapperService mapperService) {
+ MapperService mapperService) {
super(indexShard.shardId(), indexSettings);
this.mapperService = mapperService;
this.indexShard = indexShard;
}
public GetStats stats() {
- return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
+ return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()),
+ missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
}
- public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
+ public GetResult get(String type, String id, String[] gFields, boolean realtime, long version,
+ VersionType versionType, FetchSourceContext fetchSourceContext) {
return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false);
}
@@ -105,7 +107,8 @@ public GetResult getForUpdate(String type, String id, long version, VersionType
*
* Note: Call must release engine searcher associated with engineGetResult!
*/
- public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) {
+ public GetResult get(Engine.GetResult engineGetResult, String id, String type,
+ String[] fields, FetchSourceContext fetchSourceContext) {
if (!engineGetResult.exists()) {
return new GetResult(shardId.getIndexName(), type, id, -1, false, null, null);
}
@@ -176,7 +179,8 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea
}
}
- private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
+ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext,
+ Engine.GetResult get, MapperService mapperService) {
Map fields = null;
BytesReference source = null;
DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
diff --git a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java
index 603d5c304b634..030ae18dfc401 100644
--- a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java
+++ b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java
@@ -51,8 +51,9 @@ public MergeStats() {
}
- public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes, long currentMerges, long currentNumDocs, long currentSizeInBytes,
- long stoppedTimeMillis, long throttledTimeMillis, double mbPerSecAutoThrottle) {
+ public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes,
+ long currentMerges, long currentNumDocs, long currentSizeInBytes,
+ long stoppedTimeMillis, long throttledTimeMillis, double mbPerSecAutoThrottle) {
this.total += totalMerges;
this.totalTimeInMillis += totalMergeTime;
this.totalNumDocs += totalNumDocs;
diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
index bac514311a3b8..ee2172358af16 100644
--- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
+++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
@@ -401,7 +401,8 @@ public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBui
* @param filterFunctionBuilders the filters and functions to execute
* @return the function score query
*/
- public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders) {
+ public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder,
+ FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders) {
return new FunctionScoreQueryBuilder(queryBuilder, filterFunctionBuilders);
}
diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java b/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java
index 9e0ee2a1c33e5..10fb77d531155 100644
--- a/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java
+++ b/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java
@@ -37,7 +37,8 @@ public class QueryValidationException extends ValidationException {
* @param validationException an initial exception. Can be {@code null}, in which case a new exception is created.
* @return a {@link QueryValidationException} with added validation error message
*/
- public static QueryValidationException addValidationError(String queryId, String validationError, QueryValidationException validationException) {
+ public static QueryValidationException addValidationError(String queryId, String validationError,
+ QueryValidationException validationException) {
if (validationException == null) {
validationException = new QueryValidationException();
}
@@ -52,7 +53,8 @@ public static QueryValidationException addValidationError(String queryId, String
* @param validationException an initial exception. Can be {@code null}, in which case a new exception is created.
* @return a {@link QueryValidationException} with added validation error message
*/
- public static QueryValidationException addValidationErrors(List validationErrors, QueryValidationException validationException) {
+ public static QueryValidationException addValidationErrors(List validationErrors,
+ QueryValidationException validationException) {
if (validationException == null) {
validationException = new QueryValidationException();
}
diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
index 63e88ae19a919..017317279e525 100644
--- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
+++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java
@@ -57,16 +57,19 @@ public MultiMatchQuery(QueryShardContext context) {
super(context);
}
- private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException {
+ private Query parseAndApply(Type type, String fieldName, Object value,
+ String minimumShouldMatch, Float boostValue) throws IOException {
Query query = parse(type, fieldName, value);
query = Queries.maybeApplyMinimumShouldMatch(query, minimumShouldMatch);
- if (query != null && boostValue != null && boostValue != AbstractQueryBuilder.DEFAULT_BOOST && query instanceof MatchNoDocsQuery == false) {
+ if (query != null && boostValue != null &&
+ boostValue != AbstractQueryBuilder.DEFAULT_BOOST && query instanceof MatchNoDocsQuery == false) {
query = new BoostQuery(query, boostValue);
}
return query;
}
- public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException {
+ public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNames,
+ Object value, String minimumShouldMatch) throws IOException {
final Query result;
// reset query builder
queryBuilder = null;
@@ -104,7 +107,8 @@ public QueryBuilder(float tieBreaker) {
this.tieBreaker = tieBreaker;
}
- public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException{
+ public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames,
+ Object value, String minimumShouldMatch) throws IOException{
List queries = new ArrayList<>();
for (String fieldName : fieldNames.keySet()) {
Float boostValue = fieldNames.get(fieldName);
@@ -159,7 +163,8 @@ final class CrossFieldsQueryBuilder extends QueryBuilder {
}
@Override
- public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException {
+ public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames,
+ Object value, String minimumShouldMatch) throws IOException {
Map> groups = new HashMap<>();
List queries = new ArrayList<>();
for (Map.Entry entry : fieldNames.entrySet()) {
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java
index 24ed98e9affe3..c0a89e7cf006c 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java
@@ -81,7 +81,8 @@ default void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh
* @param currentState the new shard state
* @param reason the reason for the state change if there is one, null otherwise
*/
- default void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {}
+ default void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState,
+ IndexShardState currentState, @Nullable String reason) {}
/**
* Called when a shard is marked as inactive
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java
index 1a13586d4dacc..3a6df72a740e9 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java
@@ -68,7 +68,8 @@ protected IndexSearcher wrap(IndexSearcher searcher) throws IOException {
* This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/
public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException {
- final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
+ final ElasticsearchDirectoryReader elasticsearchDirectoryReader =
+ ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
if (elasticsearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
}
@@ -76,8 +77,9 @@ public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOExcep
DirectoryReader reader = wrap(nonClosingReaderWrapper);
if (reader != nonClosingReaderWrapper) {
if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) {
- throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" +
- " to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs");
+ throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," +
+ " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " +
+ "used as cache keys since their are used only per request which would lead to subtle bugs");
}
if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
// prevent that somebody wraps with a non-filter reader
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index 921eeb319f1a0..0638ce32a147c 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -222,9 +222,11 @@ Runnable getGlobalCheckpointSyncer() {
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY.
- // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
- // a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
- private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
+ // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on
+ // version checks to make sure its consistent a relocated shard can also be target of a replication if the relocation target has not
+ // been marked as active yet and is syncing it's changes back to the relocation source
+ private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING,
+ IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
private final IndexSearcherWrapper searcherWrapper;
@@ -412,10 +414,12 @@ public void updateShardState(final ShardRouting newRouting,
currentRouting = this.shardRouting;
if (!newRouting.shardId().equals(shardId())) {
- throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId());
+ throw new IllegalArgumentException("Trying to set a routing entry with shardId " +
+ newRouting.shardId() + " on a shard with shardId " + shardId());
}
if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
- throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
+ throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " +
+ currentRouting + ", new " + newRouting);
}
if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) {
throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current "
@@ -435,10 +439,11 @@ public void updateShardState(final ShardRouting newRouting,
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
- // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery
- // failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two
- // active primaries.
- throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
+ // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard
+ // routing occur (e.g. due to recovery failure / cancellation). The reason is that at the moment we cannot safely
+ // reactivate primary mode without risking two active primaries.
+ throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " +
+ newRouting.state());
}
assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED :
"routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state;
@@ -475,8 +480,8 @@ public void updateShardState(final ShardRouting newRouting,
"primary terms can only go up; current term [" + pendingPrimaryTerm + "], new term [" + newPrimaryTerm + "]";
/*
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
- * increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is
- * incremented.
+ * increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary
+ * term is incremented.
*/
// to prevent primary relocation handoff while resync is not completed
boolean resyncStarted = primaryReplicaResyncInProgress.compareAndSet(false, true);
@@ -522,13 +527,15 @@ public void updateShardState(final ShardRouting newRouting,
public void onResponse(ResyncTask resyncTask) {
logger.info("primary-replica resync completed with {} operations",
resyncTask.getResyncedOperations());
- boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
+ boolean resyncCompleted =
+ primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
}
@Override
public void onFailure(Exception e) {
- boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
+ boolean resyncCompleted =
+ primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
if (state == IndexShardState.CLOSED) {
// ignore, shutting down
@@ -594,7 +601,8 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta
* @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation
* @throws InterruptedException if blocking operations is interrupted
*/
- public void relocated(final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException {
+ public void relocated(final Consumer consumer)
+ throws IllegalIndexShardStateException, InterruptedException {
assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting;
try {
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
@@ -1191,7 +1199,8 @@ public void close(String reason, boolean flushEngine) throws IOException {
}
}
- public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
+ public IndexShard postRecovery(String reason)
+ throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
@@ -1462,7 +1471,8 @@ public boolean ignoreRecoveryAttempt() {
public void readAllowed() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (readAllowedStates.contains(state) == false) {
- throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + readAllowedStates.toString());
+ throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " +
+ readAllowedStates.toString());
}
}
@@ -1476,7 +1486,8 @@ private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIn
if (origin.isRecovery()) {
if (state != IndexShardState.RECOVERING) {
- throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
+ throw new IllegalIndexShardStateException(shardId, state,
+ "operation only allowed when recovering, origin [" + origin + "]");
}
} else {
if (origin == Engine.Operation.Origin.PRIMARY) {
@@ -1488,13 +1499,15 @@ private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIn
assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]";
}
if (writeAllowedStates.contains(state) == false) {
- throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]");
+ throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " +
+ writeAllowedStates + ", origin [" + origin + "]");
}
}
}
private boolean assertPrimaryMode() {
- assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting + " is not a primary shard in primary mode";
+ assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting +
+ " is not a primary shard in primary mode";
return true;
}
@@ -1571,9 +1584,11 @@ public ShardPath shardPath() {
return path;
}
- public boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, List localShards) throws IOException {
+ public boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer,
+ List localShards) throws IOException {
assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
- assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " + recoveryState.getRecoverySource();
+ assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " +
+ recoveryState.getRecoverySource();
final List snapshots = new ArrayList<>();
try {
for (IndexShard shard : localShards) {
@@ -1601,7 +1616,8 @@ public boolean recoverFromStore() {
public boolean restoreFromRepository(Repository repository) {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
- assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource();
+ assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " +
+ recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
return storeRecovery.recoverFromRepository(this, repository);
}
@@ -1690,7 +1706,8 @@ public boolean hasCompleteHistoryOperations(String source, long startingSeqNo) t
* if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing.
* This parameter should be only enabled when the entire requesting range is below the global checkpoint.
*/
- public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException {
+ public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo,
+ long toSeqNo, boolean requiredFullRange) throws IOException {
return getEngine().newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange);
}
@@ -1949,7 +1966,8 @@ assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.ST
* @param primaryContext the sequence number context
*/
public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) {
- assert shardRouting.primary() && shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " + shardRouting;
+ assert shardRouting.primary() && shardRouting.isRelocationTarget() :
+ "only primary relocation target can update allocation IDs from primary context: " + shardRouting;
assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) &&
getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint();
synchronized (mutex) {
@@ -2090,7 +2108,8 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService
recoveryListener.onRecoveryDone(recoveryState);
}
} catch (Exception e) {
- recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
+ recoveryListener.onRecoveryFailure(recoveryState,
+ new RecoveryFailedException(recoveryState, null, e), true);
}
});
break;
@@ -2100,7 +2119,8 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService
recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener);
} catch (Exception e) {
failShard("corrupted preexisting index", e);
- recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
+ recoveryListener.onRecoveryFailure(recoveryState,
+ new RecoveryFailedException(recoveryState, null, e), true);
}
break;
case SNAPSHOT:
@@ -2113,7 +2133,8 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService
recoveryListener.onRecoveryDone(recoveryState);
}
} catch (Exception e) {
- recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
+ recoveryListener.onRecoveryFailure(recoveryState,
+ new RecoveryFailedException(recoveryState, null, e), true);
}
});
break;
@@ -2407,7 +2428,8 @@ public void onFailure(final Exception e) {
}
public int getActiveOperationsCount() {
- return indexShardOperationPermits.getActiveOperationsCount(); // refCount is incremented on successful acquire and decremented on close
+ // refCount is incremented on successful acquire and decremented on close
+ return indexShardOperationPermits.getActiveOperationsCount();
}
/**
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java
index fa658c3600ea4..c7b6a99d2c7e1 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java
@@ -49,7 +49,8 @@ public static class Stats implements Streamable, ToXContentFragment {
Stats() {}
- public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long indexFailedCount, long deleteCount, long deleteTimeInMillis, long deleteCurrent, long noopUpdateCount, boolean isThrottled, long throttleTimeInMillis) {
+ public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long indexFailedCount, long deleteCount,
+ long deleteTimeInMillis, long deleteCurrent, long noopUpdateCount, boolean isThrottled, long throttleTimeInMillis) {
this.indexCount = indexCount;
this.indexTimeInMillis = indexTimeInMillis;
this.indexCurrent = indexCurrent;
diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java
index ffce215646443..d93cd988c62a7 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java
@@ -45,10 +45,14 @@ public final class ShardPath {
private final boolean isCustomDataPath;
public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) {
- assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString();
- assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
- assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString();
- assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString();
+ assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) :
+ "dataPath must end with the shard ID but didn't: " + dataPath.toString();
+ assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) :
+ "shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
+ assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) :
+ "dataPath must end with index path id but didn't: " + dataPath.toString();
+ assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) :
+ "shardStatePath must end with index path id but didn't: " + dataPath.toString();
if (isCustomDataPath && dataPath.equals(shardStatePath)) {
throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths");
}
@@ -111,7 +115,8 @@ public boolean isCustomDataPath() {
* directories with a valid shard state exist the one with the highest version will be used.
* Note: this method resolves custom data locations for the shard.
*/
- public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException {
+ public static ShardPath loadShardPath(Logger logger, NodeEnvironment env,
+ ShardId shardId, IndexSettings indexSettings) throws IOException {
final Path[] paths = env.availableShardPaths(shardId);
final int nodeLockId = env.getNodeLockId();
final Path sharedDataPath = env.sharedDataPath();
@@ -165,7 +170,8 @@ public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSetti
* This method tries to delete left-over shards where the index name has been reused but the UUID is different
* to allow the new shard to be allocated.
*/
- public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException {
+ public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env,
+ ShardLock lock, IndexSettings indexSettings) throws IOException {
final String indexUUID = indexSettings.getUUID();
final Path[] paths = env.availableShardPaths(lock.getShardId());
for (Path path : paths) {
@@ -226,7 +232,8 @@ public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shard
.filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0)
// Sort by the number of shards for this index
.sorted((p1, p2) -> {
- int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L));
+ int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L),
+ pathToShardCount.getOrDefault(p2, 0L));
if (cmp == 0) {
// if the number of shards is equal, tie-break with the number of total shards
cmp = Integer.compare(dataPathToShardCount.getOrDefault(p1.path, 0),
diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java
index 3f3f2a78100af..5d4a093cc4ba2 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java
@@ -88,7 +88,8 @@ public String toString() {
return "primary [" + primary + "], allocation [" + allocationId + "]";
}
- public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(SHARD_STATE_FILE_PREFIX) {
+ public static final MetaDataStateFormat FORMAT =
+ new MetaDataStateFormat(SHARD_STATE_FILE_PREFIX) {
@Override
protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException {
diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
index 4d8b63fc71967..1ed41ea603600 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
@@ -98,7 +98,8 @@ boolean recoverFromStore(final IndexShard indexShard) {
return false;
}
- boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, final IndexShard indexShard, final List shards) throws IOException {
+ boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer,
+ final IndexShard indexShard, final List shards) throws IOException {
if (canRecover(indexShard)) {
RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
assert recoveryType == RecoverySource.Type.LOCAL_SHARDS: "expected local shards recovery type: " + recoveryType;
@@ -133,7 +134,8 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate
internalRecoverFromStore(indexShard);
// just trigger a merge to do housekeeping on the
// copied segments - we will also see them in stats etc.
- indexShard.getEngine().forceMerge(false, -1, false, false, false);
+ indexShard.getEngine().forceMerge(false, -1, false,
+ false, false);
} catch (IOException ex) {
throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex);
}
@@ -304,7 +306,8 @@ private boolean executeRecovery(final IndexShard indexShard, Runnable recoveryRu
// to call post recovery.
final IndexShardState shardState = indexShard.state();
final RecoveryState recoveryState = indexShard.recoveryState();
- assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
+ assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING :
+ "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
if (logger.isTraceEnabled()) {
RecoveryState.Index index = recoveryState.getIndex();
@@ -316,11 +319,13 @@ private boolean executeRecovery(final IndexShard indexShard, Runnable recoveryRu
.append(new ByteSizeValue(index.recoveredBytes())).append("]\n");
sb.append(" : reusing_files [").append(index.reusedFileCount()).append("] with total_size [")
.append(new ByteSizeValue(index.reusedBytes())).append("]\n");
- sb.append(" verify_index : took [").append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [")
- .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
+ sb.append(" verify_index : took [")
+ .append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [")
+ .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
- logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
+ logger.trace("recovery completed from [shard_store], took [{}]\n{}",
+ timeValueMillis(recoveryState.getTimer().time()), sb);
} else if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
}
@@ -371,7 +376,8 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe
files += " (failure=" + ExceptionsHelper.detailedMessage(inner) + ")";
}
if (indexShouldExists) {
- throw new IndexShardRecoveryException(shardId, "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
+ throw new IndexShardRecoveryException(shardId,
+ "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
}
}
if (si != null) {
@@ -457,7 +463,8 @@ private void restore(final IndexShard indexShard, final Repository repository, f
snapshotShardId = new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, shardId.id());
}
final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName);
- repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
+ repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(),
+ restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
final Store store = indexShard.store();
store.bootstrapNewHistory();
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java
index 18ccb988b8c83..c1c3b86708b74 100644
--- a/server/src/main/java/org/elasticsearch/index/store/Store.java
+++ b/server/src/main/java/org/elasticsearch/index/store/Store.java
@@ -459,7 +459,8 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId
* can be successfully opened. This includes reading the segment infos and possible
* corruption markers.
*/
- public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
+ public static boolean canOpenIndex(Logger logger, Path indexLocation,
+ ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
try {
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
} catch (Exception ex) {
@@ -474,7 +475,8 @@ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId sh
* segment infos and possible corruption markers. If the index can not
* be opened, an exception is thrown
*/
- public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException {
+ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
+ Logger logger) throws IOException, ShardLockObtainFailedException {
try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5));
Directory dir = new SimpleFSDirectory(indexLocation)) {
failIfCorrupted(dir, shardId);
@@ -489,7 +491,8 @@ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnviron
* Note: Checksums are calculated by default since version 4.8.0. This method only adds the
* verification against the checksum in the given metadata and does not add any significant overhead.
*/
- public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException {
+ public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata,
+ final IOContext context) throws IOException {
IndexOutput output = directory().createOutput(fileName, context);
boolean success = false;
try {
@@ -537,7 +540,8 @@ public static boolean checkIntegrityNoException(StoreFileMetaData md, Directory
public static void checkIntegrity(final StoreFileMetaData md, final Directory directory) throws IOException {
try (IndexInput input = directory.openInput(md.name(), IOContext.READONCE)) {
if (input.length() != md.length()) { // first check the length no matter how old this file is
- throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
+ throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() +
+ " : file truncated?", input);
}
// throw exception if the file is corrupt
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
@@ -650,7 +654,9 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
for (String existingFile : directory.listAll()) {
if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
- continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
+ // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete
+ // checksum)
+ continue;
}
try {
directory.deleteFile(reason, existingFile);
@@ -660,7 +666,8 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr
|| existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)
|| existingFile.startsWith(CORRUPTED)) {
// TODO do we need to also fail this if we can't delete the pending commit file?
- // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
+ // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit
+ // point around?
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
}
logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
@@ -685,13 +692,14 @@ final void verifyAfterCleanup(MetadataSnapshot sourceMetaData, MetadataSnapshot
StoreFileMetaData remote = sourceMetaData.get(meta.name());
// if we have different files then they must have no checksums; otherwise something went wrong during recovery.
// we have that problem when we have an empty index is only a segments_1 file so we can't tell if it's a Lucene 4.8 file
- // and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files come out as
- // different in the diff. That's why we have to double check here again if the rest of it matches.
+ // and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files
+ // come out as different in the diff. That's why we have to double check here again if the rest of it matches.
// all is fine this file is just part of a commit or a segment that is different
if (local.isSame(remote) == false) {
logger.debug("Files are different on the recovery target: {} ", recoveryDiff);
- throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null);
+ throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " +
+ remote, null);
}
}
} else {
@@ -851,7 +859,8 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
numDocs = Lucene.getNumDocs(segmentCommitInfos);
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
- Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version.
+ // we don't know which version was used to write so we take the max version.
+ Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion();
for (SegmentCommitInfo info : segmentCommitInfos) {
final Version version = info.info.getVersion();
if (version == null) {
@@ -862,7 +871,8 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg
maxVersion = version;
}
for (String file : info.files()) {
- checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
+ checksumFromLuceneFile(directory, file, builder, logger, version,
+ SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
}
}
if (maxVersion == null) {
@@ -878,7 +888,9 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg
// Lucene checks the checksum after it tries to lookup the codec etc.
// in that case we might get only IAE or similar exceptions while we are really corrupt...
// TODO we should check the checksum in lucene if we hit an exception
- logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
+ logger.warn(() ->
+ new ParameterizedMessage("failed to build store metadata. checking segment info integrity " +
+ "(with commit [{}])", commit == null ? "no" : "yes"), ex);
Lucene.checkSegmentInfoIntegrity(directory);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
cex.addSuppressed(ex);
@@ -902,10 +914,12 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map
length = in.length();
if (length < CodecUtil.footerLength()) {
// truncated files trigger IAE if we seek negative... these files are really corrupted though
- throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in);
+ throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " +
+ CodecUtil.footerLength() + " but was: " + in.length(), in);
}
if (readFileAsHash) {
- final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for...
+ // additional safety we checksum the entire file we read the hash for...
+ final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in);
hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length);
checksum = digestToString(verifyingIndexInput.verify());
} else {
@@ -964,19 +978,21 @@ public Map asMap() {
*
* - all files in this segment have the same checksum
* - all files in this segment have the same length
- * - the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code .si} file content as it's hash
+ * - the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function,
+ * The metadata transfers the {@code .si} file content as it's hash
*
*
* The {@code .si} file contains a lot of diagnostics including a timestamp etc. in the future there might be
* unique segment identifiers in there hardening this method further.
*
- * The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files like
- * deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated
+ * The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files
+ * like deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated
* as identical iff:
*
* - all files belonging to this commit have the same checksum
* - all files belonging to this commit have the same length
- * - the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code segments_N} file content as it's hash
+ * - the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function,
+ * The metadata transfers the {@code segments_N} file content as it's hash
*
*
* NOTE: this diff will not contain the {@code segments.gen} file. This file is omitted on recovery.
@@ -994,7 +1010,8 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) {
}
final String segmentId = IndexFileNames.parseSegmentName(meta.name());
final String extension = IndexFileNames.getExtension(meta.name());
- if (IndexFileNames.SEGMENTS.equals(segmentId) || DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) {
+ if (IndexFileNames.SEGMENTS.equals(segmentId) ||
+ DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) {
// only treat del files as per-commit files fnm files are generational but only for upgradable DV
perCommitStoreFiles.add(meta);
} else {
@@ -1029,9 +1046,11 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) {
different.addAll(identicalFiles);
}
}
- RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), Collections.unmodifiableList(different), Collections.unmodifiableList(missing));
+ RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical),
+ Collections.unmodifiableList(different), Collections.unmodifiableList(missing));
assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0)
- : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]";
+ : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" +
+ this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]";
return recoveryDiff;
}
@@ -1184,8 +1203,8 @@ public void verify() throws IOException {
}
}
throw new CorruptIndexException("verification failed (hardware problem?) : expected=" + metadata.checksum() +
- " actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" + metadata.length() +
- " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")");
+ " actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" +
+ metadata.length() + " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")");
}
@Override
@@ -1203,7 +1222,8 @@ public void writeByte(byte b) throws IOException {
}
} else {
verify(); // fail if we write more than expected
- throw new AssertionError("write past EOF expected length: " + metadata.length() + " writtenBytes: " + writtenBytes);
+ throw new AssertionError("write past EOF expected length: " + metadata.length() +
+ " writtenBytes: " + writtenBytes);
}
}
out.writeByte(b);
diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
index 447ec9003a4ac..bbc7c755a67e9 100644
--- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
+++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java
@@ -80,7 +80,8 @@ public static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVect
static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequest request, LongSupplier nanoTimeSupplier) {
final long startTime = nanoTimeSupplier.getAsLong();
- final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), request.type(), request.id());
+ final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(),
+ request.type(), request.id());
final Term uidTerm = indexShard.mapperService().createUidTerm(request.type(), request.id());
if (uidTerm == null) {
termVectorsResponse.setExists(false);
@@ -96,7 +97,8 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ
handleFieldWildcards(indexShard, request);
}
- try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm)
+ try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(),
+ request.id(), uidTerm)
.version(request.version()).versionType(request.versionType()));
Engine.Searcher searcher = indexShard.acquireSearcher("term_vector")) {
Fields topLevelFields = fields(get.searcher() != null ? get.searcher().reader() : searcher.reader());
@@ -192,7 +194,8 @@ private static boolean isValidField(MappedFieldType fieldType) {
return true;
}
- private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set selectedFields) throws IOException {
+ private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField,
+ TermVectorsRequest request, Set selectedFields) throws IOException {
/* only keep valid fields */
Set validFields = new HashSet<>();
for (String field : selectedFields) {
@@ -217,7 +220,8 @@ private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetR
getFields[getFields.length - 1] = SourceFieldMapper.NAME;
GetResult getResult = indexShard.getService().get(
get, request.id(), request.type(), getFields, null);
- Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields);
+ Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(),
+ request.offsets(), request.perFieldAnalyzer(), validFields);
/* merge with existing Fields */
if (termVectorsByField == null) {
@@ -257,7 +261,12 @@ private static Set getFieldsToGenerate(Map perAnalyzerFi
return selectedFields;
}
- private static Fields generateTermVectors(IndexShard indexShard, Map source, Collection getFields, boolean withOffsets, @Nullable Map perFieldAnalyzer, Set fields) throws IOException {
+ private static Fields generateTermVectors(IndexShard indexShard,
+ Map source,
+ Collection getFields,
+ boolean withOffsets,
+ @Nullable Map perFieldAnalyzer,
+ Set fields) throws IOException {
Map> values = new HashMap<>();
for (DocumentField getField : getFields) {
String field = getField.getName();
@@ -319,8 +328,9 @@ private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVect
String[] values = doc.getValues(field.name());
documentFields.add(new DocumentField(field.name(), Arrays.asList((Object[]) values)));
}
- return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(),
- documentFields, request.offsets(), request.perFieldAnalyzer(), seenFields);
+ return generateTermVectors(indexShard,
+ XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), documentFields,
+ request.offsets(), request.perFieldAnalyzer(), seenFields);
}
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc,
diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
index 41c3252eab07a..c61718332cde3 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
@@ -38,7 +38,8 @@ public abstract class BaseTranslogReader implements Comparable= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]";
+ assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" +
+ reusableBuffer.capacity() + "]";
reusableBuffer.clear();
reusableBuffer.limit(4);
readBytes(reusableBuffer, position);
@@ -94,7 +96,8 @@ public TranslogSnapshot newSnapshot() {
* reads an operation at the given position and returns it. The buffer length is equal to the number
* of bytes reads.
*/
- protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize, BufferedChecksumStreamInput reuse) throws IOException {
+ protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize,
+ BufferedChecksumStreamInput reuse) throws IOException {
final ByteBuffer buffer;
if (reusableBuffer.capacity() >= opSize) {
buffer = reusableBuffer;
diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java
index 6baeb53136129..513d044f735ae 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java
@@ -75,8 +75,9 @@
* In Elasticsearch there is one Translog instance per {@link org.elasticsearch.index.engine.InternalEngine}. The engine
* records the current translog generation {@link Translog#getGeneration()} in it's commit metadata using {@link #TRANSLOG_GENERATION_KEY}
* to reference the generation that contains all operations that have not yet successfully been committed to the engines lucene index.
- * Additionally, since Elasticsearch 2.0 the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong association
- * between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction log that belongs to a
+ * Additionally, since Elasticsearch 2.0 the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong
+ * association between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction
+ * log that belongs to a
* different engine.
*
* Each Translog has only one translog file open for writes at any time referenced by a translog generation ID. This ID is written to a
@@ -96,10 +97,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
/*
* TODO
- * - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) but we can refactor as we go
- * - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer
+ * - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this)
+ * but we can refactor as we go
+ * - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we
+ * need to be able to do random access reads even from the buffer
* - we need random exception on the FileSystem API tests for all this.
- * - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already fsynced far enough
+ * - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already
+ * fsynced far enough
*/
public static final String TRANSLOG_GENERATION_KEY = "translog_generation";
public static final String TRANSLOG_UUID_KEY = "translog_uuid";
@@ -170,12 +174,15 @@ public Translog(
// we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this:
// https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example
//
- // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists
- // if not we don't even try to clean it up and wait until we fail creating it
- assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
+ // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that
+ // file exists. If not we don't even try to clean it up and wait until we fail creating it
+ assert Files.exists(nextTranslogFile) == false ||
+ Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) :
+ "unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
- logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName());
+ logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a" +
+ " tragic exception when creating a new generation", nextTranslogFile.getFileName());
}
this.readers.addAll(recoverFromFiles(checkpoint));
if (readers.isEmpty()) {
@@ -206,7 +213,8 @@ public Translog(
private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList foundTranslogs = new ArrayList<>();
- final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
+ // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
+ final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX);
boolean tempFileRenamed = false;
try (ReleasableLock lock = writeLock.acquire()) {
logger.debug("open uncommitted translog checkpoint {}", checkpoint);
@@ -232,7 +240,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws
throw new IllegalStateException("translog file doesn't exist with generation: " + i + " recovering from: " +
minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
- final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
+ final TranslogReader reader = openReader(committedTranslogFile,
+ Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() :
"Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" +
"translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]";
@@ -250,7 +259,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws
if (Files.exists(commitCheckpoint)) {
Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
if (checkpoint.equals(checkpointFromDisk) == false) {
- throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
+ throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() +
+ " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
}
} else {
// we first copy this into the temp-file and then fsync it followed by an atomic move into the target file
@@ -281,7 +291,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws
TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException {
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
try {
- assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
+ assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " +
+ Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
TranslogReader reader = TranslogReader.open(channel, path, checkpoint, translogUUID);
channel = null;
return reader;
@@ -302,7 +313,8 @@ public static long parseIdFromFileName(Path translogFile) {
try {
return Long.parseLong(matcher.group(1));
} catch (NumberFormatException e) {
- throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + fileName + "]", e);
+ throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " +
+ fileName + "]", e);
}
}
throw new IllegalArgumentException("can't parse id from file: " + fileName);
@@ -835,7 +847,8 @@ public TranslogStats stats() {
// acquire lock to make the two numbers roughly consistent (no file change half way)
try (ReleasableLock lock = readLock.acquire()) {
final long uncommittedGen = deletionPolicy.getTranslogGenerationOfLastCommit();
- return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen), sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge());
+ return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen),
+ sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge());
}
}
@@ -1741,7 +1754,8 @@ public boolean isCurrent(TranslogGeneration generation) {
try (ReleasableLock lock = writeLock.acquire()) {
if (generation != null) {
if (generation.translogUUID.equals(translogUUID) == false) {
- throw new IllegalArgumentException("commit belongs to a different translog: " + generation.translogUUID + " vs. " + translogUUID);
+ throw new IllegalArgumentException("commit belongs to a different translog: " +
+ generation.translogUUID + " vs. " + translogUUID);
}
return generation.translogFileGeneration == currentFileGeneration();
}
@@ -1858,12 +1872,14 @@ static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, S
ChannelFactory channelFactory, long primaryTerm) throws IOException {
IOUtils.rm(location);
Files.createDirectories(location);
- final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
+ final Checkpoint checkpoint =
+ Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME);
Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
IOUtils.fsync(checkpointFile, false);
final String translogUUID = UUIDs.randomBase64UUID();
- TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory,
+ TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1,
+ location.resolve(getFilename(1)), channelFactory,
new ByteSizeValue(10), 1, initialGlobalCheckpoint,
() -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm,
new TragicExceptionHolder());
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java
index 4091fa45762e1..a75f49972a5d2 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java
@@ -129,7 +129,8 @@ protected void readBytes(ByteBuffer buffer, long position) throws IOException {
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]");
}
if (position < getFirstOperationOffset()) {
- throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "]");
+ throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" +
+ getFirstOperationOffset() + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java
index 8fe92bba0097c..bff3e4eb2f540 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java
@@ -94,10 +94,12 @@ public long sizeInBytes() {
*/
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
if (position >= length) {
- throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" + getGeneration() + "], path: [" + path + "]");
+ throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" +
+ getGeneration() + "], path: [" + path + "]");
}
if (position < getFirstOperationOffset()) {
- throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]");
+ throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" +
+ getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java
index e0cfe9eaaff0b..6b00b0c5db3ff 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java
@@ -128,7 +128,8 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f
writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy);
} catch (Exception exception) {
// if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
- // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition
+ // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation
+ // is an error condition
IOUtils.closeWhileHandlingException(channel);
throw exception;
}
diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java
index 49e6c6597e180..6bb799ac9ebb0 100644
--- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java
+++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java
@@ -45,8 +45,10 @@
public class IndexingSlowLogTests extends ESTestCase {
public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException {
- BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject());
- ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
+ BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder()
+ .startObject().field("foo", "bar").endObject());
+ ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1),
+ SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
"test", null, null, source, XContentType.JSON, null);
Index index = new Index("foo", "123");
// Turning off document logging doesn't log source[]
@@ -68,7 +70,8 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException {
// Throwing a error if source cannot be converted
source = new BytesArray("invalid");
- pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
+ pd = new ParsedDocument(new NumericDocValuesField("version", 1),
+ SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
"test", null, null, source, XContentType.JSON, null);
p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3);
@@ -91,10 +94,12 @@ public void testReformatSetting() {
IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY);
IndexingSlowLog log = new IndexingSlowLog(settings);
assertFalse(log.isReformat());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build()));
assertTrue(log.isReformat());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "false").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "false").build()));
assertFalse(log.isReformat());
settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY));
@@ -107,7 +112,8 @@ public void testReformatSetting() {
log = new IndexingSlowLog(settings);
assertTrue(log.isReformat());
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "NOT A BOOLEAN").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "NOT A BOOLEAN").build()));
fail();
} catch (IllegalArgumentException ex) {
final String expected = "illegal value can't update [index.indexing.slowlog.reformat] from [true] to [NOT A BOOLEAN]";
@@ -115,7 +121,8 @@ public void testReformatSetting() {
assertNotNull(ex.getCause());
assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class));
final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause();
- assertThat(cause, hasToString(containsString("Failed to parse value [NOT A BOOLEAN] as only [true] or [false] are allowed.")));
+ assertThat(cause,
+ hasToString(containsString("Failed to parse value [NOT A BOOLEAN] as only [true] or [false] are allowed.")));
}
assertTrue(log.isReformat());
}
@@ -130,14 +137,17 @@ public void testLevelSetting() {
IndexingSlowLog log = new IndexingSlowLog(settings);
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
assertEquals(level, log.getLevel());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
assertEquals(level, log.getLevel());
settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY));
@@ -150,7 +160,8 @@ public void testLevelSetting() {
log = new IndexingSlowLog(settings);
assertTrue(log.isReformat());
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), "NOT A LEVEL").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), "NOT A LEVEL").build()));
fail();
} catch (IllegalArgumentException ex) {
final String expected = "illegal value can't update [index.indexing.slowlog.level] from [TRACE] to [NOT A LEVEL]";
@@ -178,7 +189,8 @@ public void testSetLevels() {
assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getIndexInfoThreshold());
assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getIndexWarnThreshold());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "120ms")
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "120ms")
.put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "220ms")
.put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "320ms")
.put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "420ms").build()));
@@ -206,28 +218,36 @@ public void testSetLevels() {
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexInfoThreshold());
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexWarnThreshold());
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE")
+ .build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.trace");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE")
+ .build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.debug");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE")
+ .build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.info");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE")
+ .build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.warn");
diff --git a/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java
index 68869592485e5..ee306c0cdf182 100644
--- a/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java
+++ b/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java
@@ -40,11 +40,16 @@ public void testCompoundFileSettings() throws IOException {
assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5));
assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
- assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
- assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
- assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
- assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
- assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new MergePolicyConfig(logger,
+ indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new MergePolicyConfig(logger,
+ indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new MergePolicyConfig(logger,
+ indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new MergePolicyConfig(logger,
+ indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new MergePolicyConfig(logger,
+ indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
}
@@ -54,7 +59,8 @@ private static IndexSettings indexSettings(Settings settings) {
}
public void testNoMerges() {
- MergePolicyConfig mp = new MergePolicyConfig(logger, indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()));
+ MergePolicyConfig mp = new MergePolicyConfig(logger,
+ indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()));
assertTrue(mp.getMergePolicy() instanceof NoMergePolicy);
}
@@ -76,47 +82,81 @@ public void testUpdateSettings() throws IOException {
public void testTieredMergePolicySettingsUpdate() throws IOException {
IndexSettings indexSettings = indexSettings(Settings.EMPTY);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
-
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
-
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
-
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1);
-
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build()));
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
-
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build()));
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
-
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build()));
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
-
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(),
+ MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
+
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(),
+ MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(),
+ MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
+
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(),
+ MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(),
+ new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(),
+ new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
+
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1);
+
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
+
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(),
+ MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(),
+ new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(),
+ new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
+
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(),
+ MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(),
+ MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build()));
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(),
+ MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
+
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(),
+ MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0);
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () ->
- indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build())));
+ indexSettings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build())));
final Throwable cause = exc.getCause();
assertThat(cause.getMessage(), containsString("must be <= 50.0"));
indexSettings.updateIndexMetaData(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
- assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(),
+ MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(),
+ new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(),
+ MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(),
+ new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(),
+ MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
+ assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(),
+ MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
}
public Settings build(String value) {
diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java
index adb7a087367d2..8d547c617e55b 100644
--- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java
+++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java
@@ -170,7 +170,8 @@ public void testSlowLogSearchContextPrinterToLog() throws IOException {
SearchContext searchContext = createSearchContext(index);
SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery());
searchContext.request().source(source);
- searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID, "my_id")));
+ searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null,
+ Collections.singletonMap(Task.X_OPAQUE_ID, "my_id")));
SearchSlowLog.SlowLogSearchContextPrinter p = new SearchSlowLog.SlowLogSearchContextPrinter(searchContext, 10);
assertThat(p.toString(), startsWith("[foo][0]"));
// Makes sure that output doesn't contain any new lines
@@ -188,14 +189,17 @@ public void testLevelSetting() {
SearchSlowLog log = new SearchSlowLog(settings);
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
assertEquals(level, log.getLevel());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
assertEquals(level, log.getLevel());
settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY));
@@ -207,7 +211,8 @@ public void testLevelSetting() {
settings = new IndexSettings(metaData, Settings.EMPTY);
log = new SearchSlowLog(settings);
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), "NOT A LEVEL").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), "NOT A LEVEL").build()));
fail();
} catch (IllegalArgumentException ex) {
final String expected = "illegal value can't update [index.search.slowlog.level] from [TRACE] to [NOT A LEVEL]";
@@ -235,7 +240,8 @@ public void testSetQueryLevels() {
assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getQueryInfoThreshold());
assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getQueryWarnThreshold());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "120ms")
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "120ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "220ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "320ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "420ms").build()));
@@ -263,28 +269,36 @@ public void testSetQueryLevels() {
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryInfoThreshold());
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryWarnThreshold());
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder()
+ .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.trace");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder()
+ .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.debug");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder()
+ .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.info");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder()
+ .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.warn");
@@ -306,7 +320,8 @@ public void testSetFetchLevels() {
assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getFetchInfoThreshold());
assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getFetchWarnThreshold());
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "120ms")
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "120ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "220ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "320ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "420ms").build()));
@@ -334,28 +349,36 @@ public void testSetFetchLevels() {
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchInfoThreshold());
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchWarnThreshold());
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(),
+ "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.trace");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(),
+ "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.debug");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(),
+ "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.info");
}
try {
- settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
+ settings.updateIndexMetaData(newIndexMeta("index",
+ Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(),
+ "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.warn");
diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
index a5a4dbd9db85c..cdc38cd3abd0d 100644
--- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
+++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
@@ -86,11 +86,12 @@ public void testThatAnalyzersAreUsedInMapping() throws IOException {
Version randomVersion = randomVersion(random());
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
- NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
+ NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX,
+ randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
- .startObject("properties").startObject("field").field("type", "text").field("analyzer", analyzerName).endObject().endObject()
- .endObject().endObject();
+ .startObject("properties").startObject("field").field("type", "text")
+ .field("analyzer", analyzerName).endObject().endObject().endObject().endObject();
MapperService mapperService = createIndex("test", indexSettings, "type", mapping).mapperService();
MappedFieldType fieldType = mapperService.fullName("field");
diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java
index 110e34d59a91c..1bf928fcb8060 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java
@@ -55,24 +55,32 @@ public void testMergesHappening() throws InterruptedException, IOException, Exec
final int numDocs = scaledRandomIntBetween(100, 1000);
BulkRequestBuilder request = client().prepareBulk();
for (int j = 0; j < numDocs; ++j) {
- request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++)).source(jsonBuilder().startObject().field("l", randomLong()).endObject()));
+ request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++))
+ .source(jsonBuilder().startObject().field("l", randomLong()).endObject()));
}
BulkResponse response = request.execute().actionGet();
refresh();
assertNoFailures(response);
- IndicesStatsResponse stats = client().admin().indices().prepareStats("test").setSegments(true).setMerge(true).get();
- logger.info("index round [{}] - segments {}, total merges {}, current merge {}", i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
+ IndicesStatsResponse stats = client().admin().indices().prepareStats("test")
+ .setSegments(true).setMerge(true).get();
+ logger.info("index round [{}] - segments {}, total merges {}, current merge {}",
+ i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
+ stats.getPrimaries().getMerge().getCurrent());
}
final long upperNumberSegments = 2 * numOfShards * 10;
awaitBusy(() -> {
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get();
- logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
+ logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards,
+ stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
+ stats.getPrimaries().getMerge().getCurrent());
long current = stats.getPrimaries().getMerge().getCurrent();
long count = stats.getPrimaries().getSegments().getCount();
return count < upperNumberSegments && current == 0;
});
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get();
- logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
+ logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards,
+ stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
+ stats.getPrimaries().getMerge().getCurrent());
long count = stats.getPrimaries().getSegments().getCount();
assertThat(count, Matchers.lessThanOrEqualTo(upperNumberSegments));
}
diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 30db7a3f5e262..25a86f8b211ae 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -314,11 +314,15 @@ public void testSegments() throws Exception {
segments = engine.segments(false);
assertThat(segments.size(), equalTo(2));
assertThat(engine.segmentsStats(false).getCount(), equalTo(2L));
- assertThat(engine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
- assertThat(engine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
+ assertThat(engine.segmentsStats(false).getTermsMemoryInBytes(),
+ greaterThan(stats.getTermsMemoryInBytes()));
+ assertThat(engine.segmentsStats(false).getStoredFieldsMemoryInBytes(),
+ greaterThan(stats.getStoredFieldsMemoryInBytes()));
assertThat(engine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L));
- assertThat(engine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
- assertThat(engine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
+ assertThat(engine.segmentsStats(false).getNormsMemoryInBytes(),
+ greaterThan(stats.getNormsMemoryInBytes()));
+ assertThat(engine.segmentsStats(false).getDocValuesMemoryInBytes(),
+ greaterThan(stats.getDocValuesMemoryInBytes()));
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
assertThat(segments.get(0).isCommitted(), equalTo(true));
assertThat(segments.get(0).isSearch(), equalTo(true));
@@ -530,7 +534,9 @@ public void testSegmentsWithIndexSort() throws Exception {
Sort indexSort = new Sort(new SortedSetSortField("_type", false));
try (Store store = createStore();
Engine engine =
- createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, null, indexSort, null)) {
+ createEngine(defaultSettings, store, createTempDir(),
+ NoMergePolicy.INSTANCE, null, null, null,
+ indexSort, null)) {
List segments = engine.segments(true);
assertThat(segments.isEmpty(), equalTo(true));
@@ -633,7 +639,8 @@ public long getCheckpoint() {
assertThat(
stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY),
not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))));
- assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY)));
+ assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY),
+ equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY)));
assertThat(Long.parseLong(stats2.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(localCheckpoint.get()));
assertThat(stats2.getUserData(), hasKey(SequenceNumbers.MAX_SEQ_NO));
assertThat(Long.parseLong(stats2.getUserData().get(SequenceNumbers.MAX_SEQ_NO)), equalTo(maxSeqNo.get()));
@@ -699,11 +706,15 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException {
for (int i = 0; i < ops; i++) {
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null);
if (randomBoolean()) {
- final Engine.Index operation = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
+ final Engine.Index operation = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO,
+ 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(),
+ -1, false);
operations.add(operation);
initialEngine.index(operation);
} else {
- final Engine.Delete operation = new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime());
+ final Engine.Delete operation = new Engine.Delete("test", "1", newUid(doc),
+ SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL,
+ Engine.Operation.Origin.PRIMARY, System.nanoTime());
operations.add(operation);
initialEngine.delete(operation);
}
@@ -799,13 +810,15 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException {
public void testRecoveryFromTranslogUpToSeqNo() throws IOException {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
try (Store store = createStore()) {
- EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
+ EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(),
+ null, null, globalCheckpoint::get);
final long maxSeqNo;
try (InternalEngine engine = createEngine(config)) {
final int docs = randomIntBetween(1, 100);
for (int i = 0; i < docs; i++) {
final String id = Integer.toString(i);
- final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(),
+ SOURCE, null);
engine.index(indexForDoc(doc));
if (rarely()) {
engine.rollTranslogGeneration();
@@ -886,7 +899,8 @@ public void testSimpleOperations() throws Exception {
// its not there...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
searchResult.close();
// but, not there non realtime
@@ -911,7 +925,8 @@ public void testSimpleOperations() throws Exception {
// now its there...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
searchResult.close();
// also in non realtime
@@ -930,8 +945,10 @@ public void testSimpleOperations() throws Exception {
// its not updated yet...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
searchResult.close();
// but, we can still get it (in realtime)
@@ -945,8 +962,10 @@ public void testSimpleOperations() throws Exception {
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
searchResult.close();
// now delete
@@ -955,8 +974,10 @@ public void testSimpleOperations() throws Exception {
// its not deleted yet
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
searchResult.close();
// but, get should not see it (in realtime)
@@ -969,8 +990,10 @@ public void testSimpleOperations() throws Exception {
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
searchResult.close();
// add it back
@@ -982,8 +1005,10 @@ public void testSimpleOperations() throws Exception {
// its not there...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
searchResult.close();
// refresh and it should be there
@@ -992,8 +1017,10 @@ public void testSimpleOperations() throws Exception {
// now its there...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
searchResult.close();
// now flush
@@ -1015,8 +1042,10 @@ public void testSimpleOperations() throws Exception {
// its not updated yet...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
searchResult.close();
// refresh and it should be updated
@@ -1024,8 +1053,10 @@ public void testSimpleOperations() throws Exception {
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
searchResult.close();
}
@@ -1041,7 +1072,8 @@ public void testSearchResultRelease() throws Exception {
// its not there...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
searchResult.close();
// refresh and it should be there
@@ -1050,7 +1082,8 @@ public void testSearchResultRelease() throws Exception {
// now its there...
searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
// don't release the search result yet...
// delete, refresh and do a new search, it should not be there
@@ -1062,7 +1095,8 @@ public void testSearchResultRelease() throws Exception {
// the non release search result should not see the deleted yet...
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
searchResult.close();
}
@@ -1072,7 +1106,8 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException {
store = createStore();
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get();
- engine = createEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier));
+ engine = createEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null,
+ globalCheckpointSupplier));
ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null);
engine.index(indexForDoc(doc));
boolean inSync = randomBoolean();
@@ -1116,8 +1151,8 @@ public void testSyncedFlush() throws IOException {
assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId),
Engine.SyncedFlushResult.COMMIT_MISMATCH);
engine.index(indexForDoc(doc));
- assertEquals("should fail to sync flush with right id but pending doc", engine.syncFlush(syncId + "2", commitID),
- Engine.SyncedFlushResult.PENDING_OPERATIONS);
+ assertEquals("should fail to sync flush with right id but pending doc",
+ engine.syncFlush(syncId + "2", commitID), Engine.SyncedFlushResult.PENDING_OPERATIONS);
commitID = engine.flush();
assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID),
Engine.SyncedFlushResult.SUCCESS);
@@ -1133,18 +1168,24 @@ public void testRenewSyncFlush() throws Exception {
InternalEngine engine =
createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null))) {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
- Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null));
+ Engine.Index doc1 =
+ indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null));
engine.index(doc1);
assertEquals(engine.getLastWriteNanos(), doc1.startTime());
engine.flush();
- Engine.Index doc2 = indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null));
+ Engine.Index doc2 =
+ indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null));
engine.index(doc2);
assertEquals(engine.getLastWriteNanos(), doc2.startTime());
engine.flush();
final boolean forceMergeFlushes = randomBoolean();
- final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null);
+ final ParsedDocument parsedDoc3 =
+ testParsedDocument("3", null, testDocumentWithTextField(), B_1, null);
if (forceMergeFlushes) {
- engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false));
+ engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY,
+ System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(),
+ -1, false));
} else {
engine.index(indexForDoc(parsedDoc3));
}
@@ -1153,7 +1194,8 @@ public void testRenewSyncFlush() throws Exception {
Engine.SyncedFlushResult.SUCCESS);
assertEquals(3, engine.segments(false).size());
- engine.forceMerge(forceMergeFlushes, 1, false, false, false);
+ engine.forceMerge(forceMergeFlushes, 1, false,
+ false, false);
if (forceMergeFlushes == false) {
engine.refresh("make all segments visible");
assertEquals(4, engine.segments(false).size());
@@ -1169,7 +1211,8 @@ public void testRenewSyncFlush() throws Exception {
assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
if (randomBoolean()) {
- Engine.Index doc4 = indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null));
+ Engine.Index doc4 =
+ indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null));
engine.index(doc4);
assertEquals(engine.getLastWriteNanos(), doc4.startTime());
} else {
@@ -1178,7 +1221,8 @@ public void testRenewSyncFlush() throws Exception {
assertEquals(engine.getLastWriteNanos(), delete.startTime());
}
assertFalse(engine.tryRenewSyncCommit());
- engine.flush(false, true); // we might hit a concurrent flush from a finishing merge here - just wait if ongoing...
+ // we might hit a concurrent flush from a finishing merge here - just wait if ongoing...
+ engine.flush(false, true);
assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID));
assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
}
@@ -1191,7 +1235,8 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException {
store = createStore();
engine = createEngine(store, primaryTranslogDir, globalCheckpoint::get);
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
- ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null);
+ ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
+ new BytesArray("{}"), null);
engine.index(indexForDoc(doc));
globalCheckpoint.set(0L);
final Engine.CommitId commitID = engine.flush();
@@ -1219,7 +1264,8 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException {
public void testSyncedFlushVanishesOnReplay() throws IOException {
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
- ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null);
+ ParsedDocument doc = testParsedDocument("1", null,
+ testDocumentWithTextField(), new BytesArray("{}"), null);
engine.index(indexForDoc(doc));
final Engine.CommitId commitID = engine.flush();
assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID),
@@ -1234,7 +1280,8 @@ public void testSyncedFlushVanishesOnReplay() throws IOException {
engine = new InternalEngine(config);
engine.initializeMaxSeqNoOfUpdatesOrDeletes();
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
- assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
+ assertNull("Sync ID must be gone since we have a document to replay",
+ engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
}
public void testVersioningNewCreate() throws IOException {
@@ -1302,7 +1349,8 @@ public void testVersionedUpdate() throws IOException {
Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED);
Engine.IndexResult indexResult = engine.index(create);
assertThat(indexResult.getVersion(), equalTo(1L));
- try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) {
+ try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()),
+ searcherFactory)) {
assertEquals(1, get.version());
}
@@ -1310,7 +1358,8 @@ public void testVersionedUpdate() throws IOException {
Engine.IndexResult update_1_result = engine.index(update_1);
assertThat(update_1_result.getVersion(), equalTo(2L));
- try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) {
+ try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()),
+ searcherFactory)) {
assertEquals(2, get.version());
}
@@ -1318,7 +1367,8 @@ public void testVersionedUpdate() throws IOException {
Engine.IndexResult update_2_result = engine.index(update_2);
assertThat(update_2_result.getVersion(), equalTo(3L));
- try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) {
+ try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()),
+ searcherFactory)) {
assertEquals(3, get.version());
}
@@ -1330,7 +1380,8 @@ public void testVersioningNewIndex() throws IOException {
Engine.IndexResult indexResult = engine.index(index);
assertThat(indexResult.getVersion(), equalTo(1L));
- index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), null, REPLICA, 0, -1, false);
+ index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(),
+ null, REPLICA, 0, -1, false);
indexResult = replicaEngine.index(index);
assertThat(indexResult.getVersion(), equalTo(1L));
}
@@ -1360,7 +1411,8 @@ public void testForceMergeWithoutSoftDeletes() throws IOException {
ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get()));
- engine.forceMerge(true, 10, true, false, false); //expunge deletes
+ //expunge deletes
+ engine.forceMerge(true, 10, true, false, false);
engine.refresh("test");
assertEquals(engine.segments(true).size(), 1);
@@ -1372,7 +1424,8 @@ public void testForceMergeWithoutSoftDeletes() throws IOException {
doc = testParsedDocument(Integer.toString(1), null, testDocument(), B_1, null);
index = indexForDoc(doc);
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get()));
- engine.forceMerge(true, 10, false, false, false); //expunge deletes
+ //expunge deletes
+ engine.forceMerge(true, 10, false, false, false);
engine.refresh("test");
assertEquals(engine.segments(true).size(), 1);
try (Engine.Searcher test = engine.acquireSearcher("test")) {
@@ -1394,7 +1447,8 @@ public void testForceMergeWithSoftDeletesRetention() throws Exception {
final MapperService mapperService = createMapperService("test");
final Set liveDocs = new HashSet<>();
try (Store store = createStore();
- InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) {
+ InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null,
+ null, globalCheckpoint::get))) {
int numDocs = scaledRandomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
@@ -1468,12 +1522,14 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc
final Set liveDocs = new HashSet<>();
final Set liveDocsWithSource = new HashSet<>();
try (Store store = createStore();
- InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null,
+ InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null,
+ null,
globalCheckpoint::get))) {
int numDocs = scaledRandomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource);
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null,
+ useRecoverySource);
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
if (useRecoverySource == false) {
@@ -1482,7 +1538,8 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc
}
for (int i = 0; i < numDocs; i++) {
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource);
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null,
+ useRecoverySource);
if (randomBoolean()) {
engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get()));
liveDocs.remove(doc.id());
@@ -1573,14 +1630,16 @@ public void run() {
int numDocs = randomIntBetween(1, 20);
for (int j = 0; j < numDocs; j++) {
i++;
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null);
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1,
+ null);
Engine.Index index = indexForDoc(doc);
engine.index(index);
}
engine.refresh("test");
indexed.countDown();
try {
- engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), randomBoolean());
+ engine.forceMerge(randomBoolean(), 1, false, randomBoolean(),
+ randomBoolean());
} catch (IOException e) {
return;
}
@@ -1609,11 +1668,13 @@ public void run() {
public void testVersioningCreateExistsException() throws IOException {
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
- Engine.Index create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
+ Engine.Index create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
Engine.IndexResult indexResult = engine.index(create);
assertThat(indexResult.getVersion(), equalTo(1L));
- create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false);
+ create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED,
+ VersionType.INTERNAL, PRIMARY, 0, -1, false);
indexResult = engine.index(create);
assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
@@ -1621,13 +1682,15 @@ public void testVersioningCreateExistsException() throws IOException {
public void testOutOfOrderDocsOnReplica() throws IOException {
final List ops = generateSingleDocHistory(true,
- randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE), 2, 2, 20, "1");
+ randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE),
+ 2, 2, 20, "1");
assertOpsOnReplica(ops, replicaEngine, true, logger);
}
public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, InterruptedException {
final List opsDoc1 =
- generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 100, 300, "1");
+ generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL),
+ 2, 100, 300, "1");
final Engine.Operation lastOpDoc1 = opsDoc1.get(opsDoc1.size() - 1);
final String lastFieldValueDoc1;
if (lastOpDoc1 instanceof Engine.Index) {
@@ -1638,7 +1701,8 @@ public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, Interrup
lastFieldValueDoc1 = null;
}
final List opsDoc2 =
- generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 100, 300, "2");
+ generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL),
+ 2, 100, 300, "2");
final Engine.Operation lastOpDoc2 = opsDoc2.get(opsDoc2.size() - 1);
final String lastFieldValueDoc2;
if (lastOpDoc2 instanceof Engine.Index) {
@@ -1706,12 +1770,14 @@ public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, Interrup
}
public void testInternalVersioningOnPrimary() throws IOException {
- final List ops = generateSingleDocHistory(false, VersionType.INTERNAL, 2, 2, 20, "1");
+ final List ops = generateSingleDocHistory(false, VersionType.INTERNAL,
+ 2, 2, 20, "1");
assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine);
}
public void testVersionOnPrimaryWithConcurrentRefresh() throws Exception {
- List ops = generateSingleDocHistory(false, VersionType.INTERNAL, 2, 10, 100, "1");
+ List ops = generateSingleDocHistory(false, VersionType.INTERNAL,
+ 2, 10, 100, "1");
CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean running = new AtomicBoolean(true);
Thread refreshThread = new Thread(() -> {
@@ -1831,7 +1897,8 @@ public void testNonInternalVersioningOnPrimary() throws IOException {
final Set nonInternalVersioning = new HashSet<>(Arrays.asList(VersionType.values()));
nonInternalVersioning.remove(VersionType.INTERNAL);
final VersionType versionType = randomFrom(nonInternalVersioning);
- final List ops = generateSingleDocHistory(false, versionType, 2, 2, 20, "1");
+ final List ops = generateSingleDocHistory(false, versionType, 2,
+ 2, 20, "1");
final Engine.Operation lastOp = ops.get(ops.size() - 1);
final String lastFieldValue;
if (lastOp instanceof Engine.Index) {
@@ -1909,8 +1976,10 @@ public void testNonInternalVersioningOnPrimary() throws IOException {
}
public void testVersioningPromotedReplica() throws IOException {
- final List replicaOps = generateSingleDocHistory(true, VersionType.INTERNAL, 1, 2, 20, "1");
- List primaryOps = generateSingleDocHistory(false, VersionType.INTERNAL, 2, 2, 20, "1");
+ final List replicaOps = generateSingleDocHistory(true, VersionType.INTERNAL, 1,
+ 2, 20, "1");
+ List primaryOps = generateSingleDocHistory(false, VersionType.INTERNAL, 2,
+ 2, 20, "1");
Engine.Operation lastReplicaOp = replicaOps.get(replicaOps.size() - 1);
final boolean deletedOnReplica = lastReplicaOp instanceof Engine.Delete;
final long finalReplicaVersion = lastReplicaOp.version();
@@ -1930,7 +1999,8 @@ public void testVersioningPromotedReplica() throws IOException {
}
public void testConcurrentExternalVersioningOnPrimary() throws IOException, InterruptedException {
- final List ops = generateSingleDocHistory(false, VersionType.EXTERNAL, 2, 100, 300, "1");
+ final List ops = generateSingleDocHistory(false, VersionType.EXTERNAL, 2,
+ 100, 300, "1");
final Engine.Operation lastOp = ops.get(ops.size() - 1);
final String lastFieldValue;
if (lastOp instanceof Engine.Index) {
@@ -1983,7 +2053,8 @@ class OpAndVersion {
throw new AssertionError(e);
}
for (int op = 0; op < opsPerThread; op++) {
- try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) {
+ try (Engine.GetResult get = engine.get(new Engine.Get(true, false,
+ doc.type(), doc.id(), uidTerm), searcherFactory)) {
FieldsVisitor visitor = new FieldsVisitor(true);
get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor);
List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString()));
@@ -2025,7 +2096,8 @@ class OpAndVersion {
assertTrue(op.added + " should not exist", exists);
}
- try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) {
+ try (Engine.GetResult get = engine.get(new Engine.Get(true, false,
+ doc.type(), doc.id(), uidTerm), searcherFactory)) {
FieldsVisitor visitor = new FieldsVisitor(true);
get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor);
List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString()));
@@ -2056,7 +2128,8 @@ private static class MockAppender extends AbstractAppender {
public boolean sawIndexWriterIFDMessage;
MockAppender(final String name) throws IllegalAccessException {
- super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null);
+ super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0],
+ false, null, null), null);
}
@Override
@@ -2123,9 +2196,11 @@ public void testSeqNoAndCheckpoints() throws IOException {
try {
initialEngine = createEngine(defaultSettings, store, createTempDir(), newLogMergePolicy(), null);
- final ShardRouting primary = TestShardRouting.newShardRouting("test", shardId.id(), "node1", null, true,
+ final ShardRouting primary = TestShardRouting.newShardRouting("test",
+ shardId.id(), "node1", null, true,
ShardRoutingState.STARTED, allocationId);
- final ShardRouting replica = TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.STARTED);
+ final ShardRouting replica =
+ TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.STARTED);
ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier();
gcpTracker.updateFromMaster(1L, new HashSet<>(Arrays.asList(primary.allocationId().getId(),
replica.allocationId().getId())),
@@ -2355,7 +2430,8 @@ private static FixedBitSet getSeqNosSet(final IndexReader reader, final long hig
throw new AssertionError("Document does not have a seq number: " + docID);
}
final long seqNo = values.longValue();
- assertFalse("should not have more than one document with the same seq_no[" + seqNo + "]", bitSet.get((int) seqNo));
+ assertFalse("should not have more than one document with the same seq_no[" +
+ seqNo + "]", bitSet.get((int) seqNo));
bitSet.set((int) seqNo);
}
}
@@ -2408,10 +2484,13 @@ public void testEnableGcDeletes() throws Exception {
document.add(new TextField("value", "test1", Field.Store.YES));
ParsedDocument doc = testParsedDocument("1", null, document, B_2, null);
- engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false));
+ engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1,
+ VersionType.EXTERNAL,
+ Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false));
// Delete document we just added:
- engine.delete(new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ engine.delete(new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
// Get should not find the document
Engine.GetResult getResult = engine.get(newGet(true, doc), searcherFactory);
@@ -2425,14 +2504,17 @@ public void testEnableGcDeletes() throws Exception {
}
// Delete non-existent document
- engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
// Get should not find the document (we never indexed uid=2):
- getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), searcherFactory);
+ getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")),
+ searcherFactory);
assertThat(getResult.exists(), equalTo(false));
// Try to index uid=1 with a too-old version, should fail:
- Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
+ Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2,
+ VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
Engine.IndexResult indexResult = engine.index(index);
assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
@@ -2442,7 +2524,8 @@ public void testEnableGcDeletes() throws Exception {
assertThat(getResult.exists(), equalTo(false));
// Try to index uid=2 with a too-old version, should fail:
- Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
+ Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2,
+ VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false);
indexResult = engine.index(index1);
assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE));
assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
@@ -2516,15 +2599,18 @@ public void testSettings() {
public void testCurrentTranslogIDisCommitted() throws IOException {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
try (Store store = createStore()) {
- EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
+ EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null,
+ globalCheckpoint::get);
// create
{
store.createEmpty();
final String translogUUID =
- Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
+ Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
+ SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
- ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null);
+ ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(),
+ new BytesArray("{}"), null);
Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
@@ -2564,7 +2650,8 @@ public void testCurrentTranslogIDisCommitted() throws IOException {
// open index with new tlog
{
final String translogUUID =
- Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
+ Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
+ SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
trimUnsafeCommits(config);
try (InternalEngine engine = new InternalEngine(config)) {
@@ -2589,7 +2676,8 @@ public void testCurrentTranslogIDisCommitted() throws IOException {
engine.initializeMaxSeqNoOfUpdatesOrDeletes();
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
userData = engine.getLastCommittedSegmentInfos().getUserData();
- assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY));
+ assertEquals("no changes - nothing to commit", "1",
+ userData.get(Translog.TRANSLOG_GENERATION_KEY));
assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY));
}
}
@@ -2613,7 +2701,8 @@ public void testMissingTranslog() throws IOException {
// expected
}
// when a new translog is created it should be ok
- final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm);
+ final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir,
+ SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm);
store.associateIndexWithNewTranslog(translogUUID);
EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null);
engine = new InternalEngine(config);
@@ -2626,7 +2715,8 @@ public void testTranslogReplayWithFailure() throws IOException {
final int numDocs = randomIntBetween(1, 10);
try (InternalEngine engine = createEngine(store, translogPath)) {
for (int i = 0; i < numDocs; i++) {
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(),
+ new BytesArray("{}"), null);
Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
Engine.IndexResult indexResult = engine.index(firstIndexRequest);
@@ -2695,7 +2785,8 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s
}) {
engine.initializeMaxSeqNoOfUpdatesOrDeletes();
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
- final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc1 = testParsedDocument("1", null,
+ testDocumentWithTextField(), SOURCE, null);
engine.index(indexForDoc(doc1));
globalCheckpoint.set(engine.getLocalCheckpoint());
throwErrorOnCommit.set(true);
@@ -2721,8 +2812,10 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s
public void testSkipTranslogReplay() throws IOException {
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), null,
+ testDocument(), new BytesArray("{}"), null);
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc,
+ SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
Engine.IndexResult indexResult = engine.index(firstIndexRequest);
assertThat(indexResult.getVersion(), equalTo(1L));
}
@@ -2761,8 +2854,11 @@ public void testTranslogReplay() throws IOException {
final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getLocalCheckpoint();
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(),
+ new BytesArray("{}"), null);
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc,
+ SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY,
+ System.nanoTime(), -1, false);
Engine.IndexResult indexResult = engine.index(firstIndexRequest);
assertThat(indexResult.getVersion(), equalTo(1L));
}
@@ -2772,7 +2868,8 @@ public void testTranslogReplay() throws IOException {
engine.close();
trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier));
- engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work
+ // we need to reuse the engine config unless the parser.mappingModified won't work
+ engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier));
engine.initializeMaxSeqNoOfUpdatesOrDeletes();
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
@@ -2793,8 +2890,10 @@ public void testTranslogReplay() throws IOException {
final boolean flush = randomBoolean();
int randomId = randomIntBetween(numDocs + 1, numDocs + 10);
- ParsedDocument doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
+ ParsedDocument doc =
+ testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null);
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
Engine.IndexResult indexResult = engine.index(firstIndexRequest);
assertThat(indexResult.getVersion(), equalTo(1L));
if (flush) {
@@ -2803,7 +2902,8 @@ public void testTranslogReplay() throws IOException {
}
doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null);
- Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
+ Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2,
+ VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false);
Engine.IndexResult result = engine.index(idxRequest);
engine.refresh("test");
assertThat(result.getVersion(), equalTo(2L));
@@ -2836,8 +2936,10 @@ public void testTranslogReplay() throws IOException {
public void testRecoverFromForeignTranslog() throws IOException {
final int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
- ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
- Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
+ ParsedDocument doc =
+ testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
Engine.IndexResult index = engine.index(firstIndexRequest);
assertThat(index.getVersion(), equalTo(1L));
}
@@ -2850,7 +2952,8 @@ public void testRecoverFromForeignTranslog() throws IOException {
Translog translog = new Translog(
new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE),
badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
- translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(),
+ "{}".getBytes(Charset.forName("UTF-8"))));
assertEquals(generation.translogFileGeneration, translog.currentFileGeneration());
translog.close();
@@ -2883,7 +2986,8 @@ public void run() {
try {
switch (operation) {
case "optimize": {
- engine.forceMerge(true, 1, false, false, false);
+ engine.forceMerge(true, 1, false, false,
+ false);
break;
}
case "refresh": {
@@ -2906,7 +3010,8 @@ public void run() {
engine.close();
mergeThread.join();
logger.info("exception caught: ", exception.get());
- assertTrue("expected an Exception that signals shard is not available", TransportActions.isShardNotAvailableException(exception.get()));
+ assertTrue("expected an Exception that signals shard is not available",
+ TransportActions.isShardNotAvailableException(exception.get()));
}
/**
@@ -3098,7 +3203,8 @@ public BytesRef binaryValue() {
}
public void testDoubleDeliveryPrimary() throws IOException {
- final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
+ new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
Engine.Index operation = appendOnlyPrimary(doc, false, 1);
Engine.Index retry = appendOnlyPrimary(doc, true, 1);
if (randomBoolean()) {
@@ -3157,8 +3263,8 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException
Engine.Index operation = appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5));
Engine.Index retry = appendOnlyReplica(doc, true, 1, randomIntBetween(0, 5));
Engine.Delete delete = new Engine.Delete(operation.type(), operation.id(), operation.uid(),
- Math.max(retry.seqNo(), operation.seqNo())+1, operation.primaryTerm(), operation.version()+1, operation.versionType(),
- REPLICA, operation.startTime()+1);
+ Math.max(retry.seqNo(), operation.seqNo())+1, operation.primaryTerm(), operation.version()+1,
+ operation.versionType(), REPLICA, operation.startTime()+1);
// operations with a seq# equal or lower to the local checkpoint are not indexed to lucene
// and the version lookup is skipped
final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0;
@@ -3310,20 +3416,24 @@ public void testDoubleDeliveryReplica() throws IOException {
public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException {
- final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
+ new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
boolean isRetry = false;
long autoGeneratedIdTimestamp = 0;
- Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
+ Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
Engine.IndexResult indexResult = engine.index(index);
assertThat(indexResult.getVersion(), equalTo(1L));
- index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(),
+ null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
indexResult = replicaEngine.index(index);
assertThat(indexResult.getVersion(), equalTo(1L));
isRetry = true;
- index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY,
+ VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
indexResult = engine.index(index);
assertThat(indexResult.getVersion(), equalTo(1L));
engine.refresh("test");
@@ -3332,7 +3442,8 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep
assertEquals(1, topDocs.totalHits.value);
}
- index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(),
+ null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
indexResult = replicaEngine.index(index);
assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.SUCCESS));
replicaEngine.refresh("test");
@@ -3344,20 +3455,25 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep
public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException {
- final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
+ new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
boolean isRetry = true;
long autoGeneratedIdTimestamp = 0;
- Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO,
+ 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
Engine.IndexResult result = engine.index(firstIndexRequest);
assertThat(result.getVersion(), equalTo(1L));
- Engine.Index firstIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), firstIndexRequest.primaryTerm(), result.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ Engine.Index firstIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(),
+ firstIndexRequest.primaryTerm(), result.getVersion(), null, REPLICA, System.nanoTime(),
+ autoGeneratedIdTimestamp, isRetry);
Engine.IndexResult indexReplicaResult = replicaEngine.index(firstIndexRequestReplica);
assertThat(indexReplicaResult.getVersion(), equalTo(1L));
isRetry = false;
- Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO,
+ 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
Engine.IndexResult indexResult = engine.index(secondIndexRequest);
assertTrue(indexResult.isCreated());
engine.refresh("test");
@@ -3366,7 +3482,8 @@ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs()
assertEquals(1, topDocs.totalHits.value);
}
- Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(), result.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
+ Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(),
+ result.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry);
replicaEngine.index(secondIndexRequestReplica);
replicaEngine.refresh("test");
try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) {
@@ -3399,7 +3516,8 @@ public void testRetryConcurrently() throws InterruptedException, IOException {
List docs = new ArrayList<>();
final boolean primary = randomBoolean();
for (int i = 0; i < numDocs; i++) {
- final ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ final ParsedDocument doc = testParsedDocument(Integer.toString(i), null,
+ testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
final Engine.Index originalIndex;
final Engine.Index retryIndex;
if (primary) {
@@ -3469,9 +3587,11 @@ public void testEngineMaxTimestampIsInitialized() throws IOException {
final long timestamp2 = randomNonNegativeLong();
final long maxTimestamp12 = Math.max(timestamp1, timestamp2);
final Function configSupplier =
- store -> config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get);
+ store -> config(defaultSettings, store, translogDir,
+ NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get);
try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = createEngine(configSupplier.apply(store))) {
- assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
+ assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
+ engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
engine.index(appendOnlyPrimary(doc, true, timestamp1));
@@ -3479,7 +3599,8 @@ public void testEngineMaxTimestampIsInitialized() throws IOException {
}
try (Store store = createStore(newFSDirectory(storeDir));
InternalEngine engine = new InternalEngine(configSupplier.apply(store))) {
- assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
+ assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP,
+ engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
engine.initializeMaxSeqNoOfUpdatesOrDeletes();
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
@@ -3492,7 +3613,8 @@ public void testEngineMaxTimestampIsInitialized() throws IOException {
}
try (Store store = createStore(newFSDirectory(storeDir))) {
if (randomBoolean() || true) {
- final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
+ final String translogUUID = Translog.createEmptyTranslog(translogDir,
+ SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
}
try (Engine engine = new InternalEngine(configSupplier.apply(store))) {
@@ -3509,7 +3631,8 @@ public void testAppendConcurrently() throws InterruptedException, IOException {
boolean primary = randomBoolean();
List docs = new ArrayList<>();
for (int i = 0; i < numDocs; i++) {
- final ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
+ final ParsedDocument doc = testParsedDocument(Integer.toString(i), null,
+ testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
Engine.Index index = primary ? appendOnlyPrimary(doc, false, i) : appendOnlyReplica(doc, false, i, i);
docs.add(index);
}
@@ -3633,7 +3756,8 @@ public void afterRefresh(boolean didRefresh) throws IOException {
}
public void testSequenceIDs() throws Exception {
- Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "type", "2", newUid("1")));
+ Tuple seqID = getSequenceID(engine, new Engine.Get(false, false,
+ "type", "2", newUid("1")));
// Non-existent doc returns no seqnum and no primary term
assertThat(seqID.v1(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO));
assertThat(seqID.v2(), equalTo(0L));
@@ -3679,7 +3803,8 @@ public void testSequenceIDs() throws Exception {
// we can query by the _seq_no
Engine.Searcher searchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
- MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(LongPoint.newExactQuery("_seq_no", 2), 1));
+ MatcherAssert.assertThat(searchResult,
+ EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(LongPoint.newExactQuery("_seq_no", 2), 1));
searchResult.close();
}
@@ -3695,7 +3820,8 @@ public void testLookupSeqNoByIdInLucene() throws Exception {
final ParsedDocument doc = EngineTestCase.createParsedDoc(id, null);
if (isIndexing) {
operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(),
- i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), -1, true));
+ i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(),
+ -1, true));
} else {
operations.add(new Engine.Delete(doc.type(), doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(),
i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis()));
@@ -3721,7 +3847,8 @@ public void testLookupSeqNoByIdInLucene() throws Exception {
String msg = "latestOps=" + latestOps + " op=" + id;
DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), newUid(id));
assertThat(msg, docIdAndSeqNo.seqNo, equalTo(latestOps.get(id).seqNo()));
- assertThat(msg, docIdAndSeqNo.isLive, equalTo(latestOps.get(id).operationType() == Engine.Operation.TYPE.INDEX));
+ assertThat(msg, docIdAndSeqNo.isLive,
+ equalTo(latestOps.get(id).operationType() == Engine.Operation.TYPE.INDEX));
}
assertThat(VersionsAndSeqNoResolver.loadDocIdAndVersion(
searcher.reader(), newUid("any-" + between(1, 10))), nullValue());
@@ -3805,7 +3932,9 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro
final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final List threads = new ArrayList<>();
initialEngine =
- createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, LocalCheckpointTracker::new, null, getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint));
+ createEngine(defaultSettings, store, primaryTranslogDir,
+ newMergePolicy(), null, LocalCheckpointTracker::new, null,
+ getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint));
final InternalEngine finalInitialEngine = initialEngine;
for (int i = 0; i < docs; i++) {
final String id = Integer.toString(i);
@@ -3926,7 +4055,8 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio
}
assertThat(engine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
- try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) {
+ try (Engine.GetResult result = engine.get(new Engine.Get(true, false,
+ "type", "2", uid), searcherFactory)) {
assertThat(result.exists(), equalTo(exists));
}
}
@@ -3961,7 +4091,8 @@ protected long doGenerateSeqNoForOperation(Operation operation) {
assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1)));
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled));
noOpEngine.noOp(
- new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason));
+ new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(),
+ randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason));
assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 2)));
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1));
// skip to the op that we added to the translog
@@ -3983,7 +4114,8 @@ protected long doGenerateSeqNoForOperation(Operation operation) {
List operationsFromLucene = readAllOperationsInLucene(noOpEngine, mapperService);
assertThat(operationsFromLucene, hasSize(maxSeqNo + 2 - localCheckpoint)); // fills n gap and 2 manual noop.
for (int i = 0; i < operationsFromLucene.size(); i++) {
- assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps")));
+ assertThat(operationsFromLucene.get(i),
+ equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps")));
}
assertConsistentHistoryBetweenTranslogAndLuceneIndex(noOpEngine, mapperService);
}
@@ -4060,7 +4192,9 @@ public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierExcepti
final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final Map threads = new LinkedHashMap<>();
actualEngine =
- createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, LocalCheckpointTracker::new, null, getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint));
+ createEngine(defaultSettings, store, primaryTranslogDir,
+ newMergePolicy(), null, LocalCheckpointTracker::new, null,
+ getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint));
final InternalEngine finalActualEngine = actualEngine;
final Translog translog = finalActualEngine.getTranslog();
final long generation = finalActualEngine.getTranslog().currentFileGeneration();
@@ -4168,7 +4302,8 @@ public void testRestoreLocalHistoryFromTranslog() throws IOException {
engineConfig = engine.config();
for (final long seqNo : seqNos) {
final String id = Long.toString(seqNo);
- final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc = testParsedDocument(id, null,
+ testDocumentWithTextField(), SOURCE, null);
engine.index(replicaIndexForDoc(doc, 1, seqNo, false));
if (rarely()) {
engine.rollTranslogGeneration();
@@ -4378,7 +4513,8 @@ public void testSeqNoGenerator() throws IOException {
SequenceNumbers.NO_OPS_PERFORMED,
SequenceNumbers.NO_OPS_PERFORMED);
final AtomicLong seqNoGenerator = new AtomicLong(seqNo);
- try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, localCheckpointTrackerSupplier,
+ try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir,
+ newMergePolicy(), null, localCheckpointTrackerSupplier,
null, (engine, operation) -> seqNoGenerator.getAndIncrement())) {
final String id = "id";
final Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE);
@@ -4451,8 +4587,8 @@ public void testKeepTranslogAfterGlobalCheckpoint() throws Exception {
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
- final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null,
- () -> globalCheckpoint.get());
+ final EngineConfig engineConfig = config(indexSettings, store, translogPath,
+ NoMergePolicy.INSTANCE, null, null, () -> globalCheckpoint.get());
try (InternalEngine engine = new InternalEngine(engineConfig) {
@Override
protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException {
@@ -4600,7 +4736,8 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
try (Store store = createStore();
InternalEngine engine =
- createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) {
+ createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(),
+ null, null, globalCheckpoint::get))) {
final int numDocs = scaledRandomIntBetween(10, 100);
for (int docId = 0; docId < numDocs; docId++) {
index(engine, docId);
@@ -4657,10 +4794,12 @@ public void testShouldPeriodicallyFlush() throws Exception {
assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false));
// A new engine may have more than one empty translog files - the test should account this extra.
final Translog translog = engine.getTranslog();
- final long extraTranslogSizeInNewEngine = engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
+ final long extraTranslogSizeInNewEngine =
+ engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
int numDocs = between(10, 100);
for (int id = 0; id < numDocs; id++) {
- final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc =
+ testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null);
engine.index(indexForDoc(doc));
}
assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false));
@@ -4678,7 +4817,8 @@ public void testShouldPeriodicallyFlush() throws Exception {
assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0));
// Stale operations skipped by Lucene but added to translog - still able to flush
for (int id = 0; id < numDocs; id++) {
- final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc =
+ testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null);
final Engine.IndexResult result = engine.index(replicaIndexForDoc(doc, 1L, id, false));
assertThat(result.isCreated(), equalTo(false));
}
@@ -4695,7 +4835,8 @@ public void testShouldPeriodicallyFlush() throws Exception {
if (randomBoolean()) {
translog.rollGeneration();
}
- final ParsedDocument doc = testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc =
+ testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null);
engine.index(replicaIndexForDoc(doc, 2L, generateNewSeqNo(engine), false));
if (engine.shouldPeriodicallyFlush()) {
engine.flush();
@@ -4719,7 +4860,8 @@ public void testStressShouldPeriodicallyFlush() throws Exception {
for (int i = 0; i < numOps; i++) {
final long localCheckPoint = engine.getLocalCheckpoint();
final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5);
- final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null);
+ final ParsedDocument doc =
+ testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null);
engine.index(replicaIndexForDoc(doc, 1L, seqno, false));
if (rarely() && engine.getTranslog().shouldRollGeneration()) {
engine.rollTranslogGeneration();
@@ -4742,7 +4884,8 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), TimeValue.timeValueMillis(1))).build();
engine.engineConfig.getIndexSettings().updateIndexMetaData(indexMetaData);
engine.onSettingsChanged();
- ParsedDocument document = testParsedDocument(Integer.toString(0), null, testDocumentWithTextField(), SOURCE, null);
+ ParsedDocument document =
+ testParsedDocument(Integer.toString(0), null, testDocumentWithTextField(), SOURCE, null);
final Engine.Index doc = new Engine.Index(newUid(document), document, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false);
// first index an append only document and then delete it. such that we have it in the tombstones
@@ -4750,15 +4893,18 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup
engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), primaryTerm.get()));
// now index more append only docs and refresh so we re-enabel the optimization for unsafe version map
- ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null);
+ ParsedDocument document1 =
+ testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null);
engine.index(new Engine.Index(newUid(document1), document1, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false));
engine.refresh("test");
- ParsedDocument document2 = testParsedDocument(Integer.toString(2), null, testDocumentWithTextField(), SOURCE, null);
+ ParsedDocument document2 =
+ testParsedDocument(Integer.toString(2), null, testDocumentWithTextField(), SOURCE, null);
engine.index(new Engine.Index(newUid(document2), document2, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false));
engine.refresh("test");
- ParsedDocument document3 = testParsedDocument(Integer.toString(3), null, testDocumentWithTextField(), SOURCE, null);
+ ParsedDocument document3 =
+ testParsedDocument(Integer.toString(3), null, testDocumentWithTextField(), SOURCE, null);
final Engine.Index doc3 = new Engine.Index(newUid(document3), document3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false);
engine.index(doc3);
@@ -4770,14 +4916,16 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup
CountDownLatch awaitStarted = new CountDownLatch(1);
Thread thread = new Thread(() -> {
awaitStarted.countDown();
- try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(), doc3.id(), doc3.uid()),
+ try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(),
+ doc3.id(), doc3.uid()),
engine::acquireSearcher)) {
assertTrue(getResult.exists());
}
});
thread.start();
awaitStarted.await();
- try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), doc.uid()),
+ try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(),
+ doc.id(), doc.uid()),
engine::acquireSearcher)) {
assertFalse(getResult.exists());
}
@@ -4829,9 +4977,11 @@ public void testPruneOnlyDeletesAtMostLocalCheckpoint() throws Exception {
// Fill the seqno gap - should prune all tombstones.
clock.set(between(0, 100));
if (randomBoolean()) {
- engine.index(replicaIndexForDoc(testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, gapSeqNo, false));
+ engine.index(replicaIndexForDoc(testParsedDocument("d", null, testDocumentWithTextField(),
+ SOURCE, null), 1, gapSeqNo, false));
} else {
- engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), Versions.MATCH_ANY, gapSeqNo, threadPool.relativeTimeInMillis()));
+ engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), Versions.MATCH_ANY,
+ gapSeqNo, threadPool.relativeTimeInMillis()));
}
clock.set(randomLongBetween(100 + gcInterval * 4/3, Long.MAX_VALUE)); // Need a margin for gcInterval/4.
engine.refresh("test");
@@ -4851,7 +5001,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception {
latch.countDown();
final int numDocs = scaledRandomIntBetween(100, 1000);
for (int i = 0; i < numDocs; i++) {
- ParsedDocument doc = testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null);
+ ParsedDocument doc =
+ testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null);
if (randomBoolean()) {
engine.index(appendOnlyReplica(doc, randomBoolean(), 1, generateNewSeqNo(engine)));
} else {
@@ -4868,7 +5019,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception {
long maxSeqNoOfNonAppendOnly = SequenceNumbers.NO_OPS_PERFORMED;
final int numOps = scaledRandomIntBetween(100, 1000);
for (int i = 0; i < numOps; i++) {
- ParsedDocument parsedDocument = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null);
+ ParsedDocument parsedDocument =
+ testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null);
if (randomBoolean()) { // On replica - update max_seqno for non-append-only operations
final long seqno = generateNewSeqNo(engine);
final Engine.Index doc = replicaIndexForDoc(parsedDocument, 1, seqno, randomBoolean());
@@ -4883,7 +5035,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception {
if (randomBoolean()) {
engine.index(indexForDoc(parsedDocument));
} else {
- engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()), primaryTerm.get()));
+ engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(),
+ newUid(parsedDocument.id()), primaryTerm.get()));
}
}
}
@@ -4926,7 +5079,8 @@ public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception {
assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes));
// optimize for other append-only 2 (its seqno > max_seqno of non-append-only) - do not look up in version map.
- engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null, testDocumentWithTextField(), SOURCE, null),
+ engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null,
+ testDocumentWithTextField(), SOURCE, null),
false, randomNonNegativeLong(), generateNewSeqNo(engine)));
assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes));
}
@@ -4937,12 +5091,14 @@ public void testTrimUnsafeCommits() throws Exception {
final List seqNos = LongStream.rangeClosed(0, maxSeqNo).boxed().collect(Collectors.toList());
Collections.shuffle(seqNos, random());
try (Store store = createStore()) {
- EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
+ EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(),
+ null, null, globalCheckpoint::get);
final List commitMaxSeqNo = new ArrayList<>();
final long minTranslogGen;
try (InternalEngine engine = createEngine(config)) {
for (int i = 0; i < seqNos.size(); i++) {
- ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), new BytesArray("{}"), null);
+ ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(),
+ new BytesArray("{}"), null);
Engine.Index index = new Engine.Index(newUid(doc), doc, seqNos.get(i), 0,
1, null, REPLICA, System.nanoTime(), -1, false);
engine.index(index);
@@ -5047,7 +5203,8 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException {
Randomness.shuffle(operations);
Set existingSeqNos = new HashSet<>();
store = createStore();
- engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get));
+ engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null,
+ globalCheckpoint::get));
assertThat(engine.getMinRetainedSeqNo(), equalTo(0L));
long lastMinRetainedSeqNo = engine.getMinRetainedSeqNo();
for (Engine.Operation op : operations) {
@@ -5119,7 +5276,8 @@ public void testLastRefreshCheckpoint() throws Exception {
refreshThreads[i].start();
}
latch.await();
- List ops = generateSingleDocHistory(true, VersionType.EXTERNAL, 1, 10, 1000, "1");
+ List ops = generateSingleDocHistory(true, VersionType.EXTERNAL,
+ 1, 10, 1000, "1");
concurrentlyApplyOps(ops, engine);
done.set(true);
for (Thread thread : refreshThreads) {
@@ -5231,7 +5389,8 @@ public void testRebuildLocalCheckpointTracker() throws Exception {
final ParsedDocument doc = EngineTestCase.createParsedDoc(Integer.toString(between(1, 100)), null);
if (randomBoolean()) {
operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(),
- i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), -1, true));
+ i, null, Engine.Operation.Origin.REPLICA,
+ threadPool.relativeTimeInMillis(), -1, true));
} else if (randomBoolean()) {
operations.add(new Engine.Delete(doc.type(), doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(),
i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis()));
@@ -5244,7 +5403,8 @@ public void testRebuildLocalCheckpointTracker() throws Exception {
List> commits = new ArrayList<>();
commits.add(new ArrayList<>());
try (Store store = createStore()) {
- EngineConfig config = config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get);
+ EngineConfig config = config(indexSettings, store, translogPath,
+ newMergePolicy(), null, null, globalCheckpoint::get);
final List docs;
try (InternalEngine engine = createEngine(config)) {
List flushedOperations = new ArrayList<>();
@@ -5282,8 +5442,8 @@ public void testRebuildLocalCheckpointTracker() throws Exception {
try (InternalEngine engine = new InternalEngine(config)) { // do not recover from translog
final LocalCheckpointTracker tracker = engine.getLocalCheckpointTracker();
for (Engine.Operation op : operations) {
- assertThat("seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + " checkpoint=" + tracker.getCheckpoint(),
- tracker.contains(op.seqNo()), equalTo(safeCommit.contains(op)));
+ assertThat("seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() +
+ " checkpoint=" + tracker.getCheckpoint(), tracker.contains(op.seqNo()), equalTo(safeCommit.contains(op)));
}
engine.initializeMaxSeqNoOfUpdatesOrDeletes();
engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE);
diff --git a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java
index 4a5e303a0542e..46c9f56c33a95 100644
--- a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java
@@ -113,7 +113,8 @@ protected void doAssertLuceneQuery(BoolQueryBuilder queryBuilder, Query query, S
}
}
- private static List getBooleanClauses(List queryBuilders, BooleanClause.Occur occur, QueryShardContext context) throws IOException {
+ private static List getBooleanClauses(List queryBuilders,
+ BooleanClause.Occur occur, QueryShardContext context) throws IOException {
List clauses = new ArrayList<>();
for (QueryBuilder query : queryBuilders) {
Query innerQuery = query.toQuery(context);
@@ -184,11 +185,13 @@ public void testDefaultMinShouldMatch() throws Exception {
assertEquals(0, bq.getMinimumNumberShouldMatch());
// Filters have a minShouldMatch of 0/1
- ConstantScoreQuery csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().must(termQuery("foo", "bar")))).toQuery(createShardContext());
+ ConstantScoreQuery csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery()
+ .must(termQuery("foo", "bar")))).toQuery(createShardContext());
bq = (BooleanQuery) csq.getQuery();
assertEquals(0, bq.getMinimumNumberShouldMatch());
- csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().should(termQuery("foo", "bar")))).toQuery(createShardContext());
+ csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().should(termQuery("foo", "bar"))))
+ .toQuery(createShardContext());
bq = (BooleanQuery) csq.getQuery();
assertEquals(1, bq.getMinimumNumberShouldMatch());
}
@@ -411,7 +414,8 @@ public void testRewriteWithMatchNone() throws IOException {
boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.must(new TermQueryBuilder("foo","bar"));
- boolQueryBuilder.filter(new BoolQueryBuilder().should(new TermQueryBuilder("foo","bar")).filter(new MatchNoneQueryBuilder()));
+ boolQueryBuilder.filter(new BoolQueryBuilder().should(new TermQueryBuilder("foo","bar"))
+ .filter(new MatchNoneQueryBuilder()));
rewritten = Rewriteable.rewrite(boolQueryBuilder, createShardContext());
assertEquals(new MatchNoneQueryBuilder(), rewritten);
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java
index cdc65cce92708..0e0f767d5a5f1 100644
--- a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java
@@ -33,7 +33,8 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase failingQueryBuilder.toQuery(shardContext));
assertThat(e.getMessage(), containsString("failed to find geo_point field [unmapped]"));
diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
index 1a4e69af25342..5615944219c91 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
@@ -233,7 +233,8 @@ protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsReque
if (request.doc() != null) {
generatedFields = generateFields(randomFields, request.doc().utf8ToString());
} else {
- generatedFields = generateFields(request.selectedFields().toArray(new String[request.selectedFields().size()]), request.id());
+ generatedFields =
+ generateFields(request.selectedFields().toArray(new String[request.selectedFields().size()]), request.id());
}
EnumSet flags = EnumSet.of(TermVectorsRequest.Flag.Positions, TermVectorsRequest.Flag.Offsets);
response.setFields(generatedFields, request.selectedFields(), flags, generatedFields);
@@ -284,20 +285,24 @@ public void testValidateEmptyFields() {
public void testValidateEmptyLike() {
String[] likeTexts = randomBoolean() ? null : new String[0];
Item[] likeItems = randomBoolean() ? null : new Item[0];
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems));
+ IllegalArgumentException e =
+ expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems));
assertThat(e.getMessage(), containsString("requires either 'like' texts or items to be specified"));
}
public void testUnsupportedFields() throws IOException {
String unsupportedField = randomFrom(INT_FIELD_NAME, DOUBLE_FIELD_NAME, DATE_FIELD_NAME);
- MoreLikeThisQueryBuilder queryBuilder = new MoreLikeThisQueryBuilder(new String[] {unsupportedField}, new String[]{"some text"}, null)
+ MoreLikeThisQueryBuilder queryBuilder =
+ new MoreLikeThisQueryBuilder(new String[] {unsupportedField}, new String[]{"some text"}, null)
.failOnUnsupportedField(true);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> queryBuilder.toQuery(createShardContext()));
assertThat(e.getMessage(), containsString("more_like_this only supports text/keyword fields"));
}
public void testMoreLikeThisBuilder() throws Exception {
- Query parsedQuery = parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null).minTermFreq(1).maxQueryTerms(12)).toQuery(createShardContext());
+ Query parsedQuery =
+ parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null)
+ .minTermFreq(1).maxQueryTerms(12)).toQuery(createShardContext());
assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
index 2f69ef7674d4f..58604f7e83c47 100644
--- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java
@@ -102,7 +102,8 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() {
if (randomBoolean()) {
query.slop(randomIntBetween(0, 5));
}
- if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() && (query.type() == Type.BEST_FIELDS || query.type() == Type.MOST_FIELDS)) {
+ if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() &&
+ (query.type() == Type.BEST_FIELDS || query.type() == Type.MOST_FIELDS)) {
query.fuzziness(randomFuzziness(fieldName));
}
if (randomBoolean()) {
@@ -197,7 +198,8 @@ public void testToQueryMultipleTermsBooleanQuery() throws Exception {
}
public void testToQueryMultipleFieldsDisableDismax() throws Exception {
- Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(false).toQuery(createShardContext());
+ Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(false)
+ .toQuery(createShardContext());
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
DisjunctionMaxQuery dQuery = (DisjunctionMaxQuery) query;
assertThat(dQuery.getTieBreakerMultiplier(), equalTo(1.0f));
@@ -207,7 +209,8 @@ public void testToQueryMultipleFieldsDisableDismax() throws Exception {
}
public void testToQueryMultipleFieldsDisMaxQuery() throws Exception {
- Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(true).toQuery(createShardContext());
+ Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(true)
+ .toQuery(createShardContext());
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query;
assertThat(disMaxQuery.getTieBreakerMultiplier(), equalTo(0.0f));
@@ -230,8 +233,10 @@ public void testToQueryFieldsWildcard() throws Exception {
}
public void testToQueryFieldMissing() throws Exception {
- assertThat(multiMatchQuery("test").field(MISSING_WILDCARD_FIELD_NAME).toQuery(createShardContext()), instanceOf(MatchNoDocsQuery.class));
- assertThat(multiMatchQuery("test").field(MISSING_FIELD_NAME).toQuery(createShardContext()), instanceOf(MatchNoDocsQuery.class));
+ assertThat(multiMatchQuery("test").field(MISSING_WILDCARD_FIELD_NAME).toQuery(createShardContext()),
+ instanceOf(MatchNoDocsQuery.class));
+ assertThat(multiMatchQuery("test").field(MISSING_FIELD_NAME).toQuery(createShardContext()),
+ instanceOf(MatchNoDocsQuery.class));
}
public void testFromJson() throws IOException {
@@ -333,7 +338,8 @@ public void testToFuzzyQuery() throws Exception {
qb.fuzzyTranspositions(false);
Query query = qb.toQuery(createShardContext());
- FuzzyQuery expected = new FuzzyQuery(new Term(STRING_FIELD_NAME, "text"), 2, 2, 5, false);
+ FuzzyQuery expected = new FuzzyQuery(new Term(STRING_FIELD_NAME, "text"), 2, 2,
+ 5, false);
assertEquals(expected, query);
}
@@ -346,8 +352,9 @@ public void testDefaultField() throws Exception {
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
- STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
+ newIndexMeta("index", context.getIndexSettings().getSettings(),
+ Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5")
+ .build())
);
MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello");
@@ -361,7 +368,8 @@ public void testDefaultField() throws Exception {
assertEquals(expected, query);
context.getIndexSettings().updateIndexMetaData(
- newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
+ newIndexMeta("index", context.getIndexSettings().getSettings(),
+ Settings.builder().putList("index.query.default_field",
STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build())
);
// should fail because lenient defaults to false
@@ -440,7 +448,8 @@ public void testWithStopWords() throws Exception {
)
.toQuery(createShardContext());
expected = new BooleanQuery.Builder()
- .add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f), BooleanClause.Occur.SHOULD)
+ .add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()),
+ 0f), BooleanClause.Occur.SHOULD)
.build();
assertEquals(expected, query);
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java
index 0536dae6dfa39..1ffa85de0ecdf 100644
--- a/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java
@@ -70,7 +70,8 @@ public void testIllegalArgument() {
}
public void testDist() {
- SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), new SpanTermQueryBuilder("name2", "value2"));
+ SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"),
+ new SpanTermQueryBuilder("name2", "value2"));
assertThat(builder.pre(), equalTo(0));
assertThat(builder.post(), equalTo(0));
builder.dist(-4);
@@ -82,7 +83,8 @@ public void testDist() {
}
public void testPrePost() {
- SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), new SpanTermQueryBuilder("name2", "value2"));
+ SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"),
+ new SpanTermQueryBuilder("name2", "value2"));
assertThat(builder.pre(), equalTo(0));
assertThat(builder.post(), equalTo(0));
builder.pre(-4).post(-4);
diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java
index 8563d9883909f..9b11017e5980e 100644
--- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java
@@ -138,7 +138,8 @@ public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception {
}
@Override
- public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) {
+ public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode,
+ XFieldComparatorSource.Nested nested, boolean reverse) {
throw new UnsupportedOperationException(UNSUPPORTED);
}
@@ -228,7 +229,8 @@ public AtomicNumericFieldData loadDirect(LeafReaderContext context) throws Excep
}
@Override
- public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) {
+ public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode,
+ XFieldComparatorSource.Nested nested, boolean reverse) {
throw new UnsupportedOperationException(UNSUPPORTED);
}
@@ -246,11 +248,14 @@ public Index index() {
private static final ScoreFunction RANDOM_SCORE_FUNCTION = new RandomScoreFunction(0, 0, new IndexFieldDataStub());
private static final ScoreFunction FIELD_VALUE_FACTOR_FUNCTION = new FieldValueFactorFunction("test", 1,
FieldValueFactorFunction.Modifier.LN, 1.0, null);
- private static final ScoreFunction GAUSS_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
+ private static final ScoreFunction GAUSS_DECAY_FUNCTION =
+ new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
GaussDecayFunctionBuilder.GAUSS_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX);
- private static final ScoreFunction EXP_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
+ private static final ScoreFunction EXP_DECAY_FUNCTION =
+ new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
ExponentialDecayFunctionBuilder.EXP_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX);
- private static final ScoreFunction LIN_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
+ private static final ScoreFunction LIN_DECAY_FUNCTION =
+ new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
LinearDecayFunctionBuilder.LINEAR_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX);
private static final ScoreFunction WEIGHT_FACTOR_FUNCTION = new WeightFactorFunction(4);
private static final String TEXT = "The way out is through.";
@@ -319,7 +324,8 @@ public void testExplainFunctionScoreQuery() throws IOException {
}
public Explanation getFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction scoreFunction) throws IOException {
- FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, CombineFunction.AVG,0.0f, 100);
+ FunctionScoreQuery functionScoreQuery =
+ new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, CombineFunction.AVG,0.0f, 100);
Weight weight = searcher.createWeight(searcher.rewrite(functionScoreQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1f);
Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0);
return explanation.getDetails()[1];
@@ -370,7 +376,8 @@ public void testExplainFiltersFunctionScoreQuery() throws IOException {
checkFiltersFunctionScoreExplanation(functionExplanation, "random score function (seed: 0, field: test)", 0);
assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails().length, equalTo(0));
- checkFiltersFunctionScoreExplanation(functionExplanation, "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1);
+ checkFiltersFunctionScoreExplanation(functionExplanation,
+ "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1);
assertThat(functionExplanation.getDetails()[0].getDetails()[1].getDetails()[1].getDetails().length, equalTo(0));
checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 2);
@@ -408,7 +415,8 @@ public FunctionScoreQuery getFiltersFunctionScoreQuery(FunctionScoreQuery.ScoreM
filterFunctions[i] = new FunctionScoreQuery.FilterScoreFunction(
new TermQuery(TERM), scoreFunctions[i]);
}
- return new FunctionScoreQuery(new TermQuery(TERM), scoreMode, filterFunctions, combineFunction,Float.MAX_VALUE * -1, Float.MAX_VALUE);
+ return new FunctionScoreQuery(new TermQuery(TERM),
+ scoreMode, filterFunctions, combineFunction,Float.MAX_VALUE * -1, Float.MAX_VALUE);
}
public void checkFiltersFunctionScoreExplanation(Explanation randomExplanation, String functionExpl, int whichFunction) {
@@ -627,15 +635,19 @@ public void testFunctionScoreHashCodeAndEquals() {
float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat();
ScoreFunction function = new DummyScoreFunction(combineFunction);
- FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
- FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
+ FunctionScoreQuery q =
+ new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
+ FunctionScoreQuery q1 =
+ new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
assertEquals(q, q);
assertEquals(q.hashCode(), q.hashCode());
assertEquals(q, q1);
assertEquals(q.hashCode(), q1.hashCode());
- FunctionScoreQuery diffQuery = new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, combineFunction, minScore, maxBoost);
- FunctionScoreQuery diffMinScore = new FunctionScoreQuery(q.getSubQuery(), function, combineFunction, minScore == null ? 1.0f : null, maxBoost);
+ FunctionScoreQuery diffQuery =
+ new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, combineFunction, minScore, maxBoost);
+ FunctionScoreQuery diffMinScore =
+ new FunctionScoreQuery(q.getSubQuery(), function, combineFunction, minScore == null ? 1.0f : null, maxBoost);
ScoreFunction otherFunction = new DummyScoreFunction(combineFunction);
FunctionScoreQuery diffFunction = new FunctionScoreQuery(q.getSubQuery(), otherFunction, combineFunction, minScore, maxBoost);
FunctionScoreQuery diffMaxBoost = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")),
@@ -666,10 +678,12 @@ public void testFilterFunctionScoreHashCodeAndEquals() {
Float minScore = randomBoolean() ? null : 1.0f;
Float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat();
- FilterScoreFunction function = new FilterScoreFunction(new TermQuery(new Term("filter", "query")), scoreFunction);
+ FilterScoreFunction function =
+ new FilterScoreFunction(new TermQuery(new Term("filter", "query")), scoreFunction);
FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")),
function, combineFunction, minScore, maxBoost);
- FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
+ FunctionScoreQuery q1 =
+ new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
assertEquals(q, q);
assertEquals(q.hashCode(), q.hashCode());
assertEquals(q, q1);
@@ -684,7 +698,8 @@ public void testFilterFunctionScoreHashCodeAndEquals() {
function, combineFunction, minScore == null ? 0.9f : null, maxBoost);
FilterScoreFunction otherFunc = new FilterScoreFunction(new TermQuery(new Term("filter", "other_query")), scoreFunction);
FunctionScoreQuery diffFunc = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), randomFrom(ScoreMode.values()),
- randomBoolean() ? new ScoreFunction[] { function, otherFunc } : new ScoreFunction[] { otherFunc }, combineFunction, minScore, maxBoost);
+ randomBoolean() ? new ScoreFunction[] { function, otherFunc } :
+ new ScoreFunction[] { otherFunc }, combineFunction, minScore, maxBoost);
FunctionScoreQuery[] queries = new FunctionScoreQuery[] {
diffQuery,
diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
index 9fec336e2a33f..ee916dd4c47dd 100644
--- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
@@ -89,8 +89,10 @@ public void testGeohashCellSize() {
double equatorialDistance = 2 * Math.PI * 6378137.0;
double polarDistance = Math.PI * 6356752.314245;
assertThat(GeoUtils.geoHashCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
- assertThat(GeoUtils.geoHashCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2))));
- assertThat(GeoUtils.geoHashCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
+ assertThat(GeoUtils.geoHashCellSize(1),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2))));
+ assertThat(GeoUtils.geoHashCellSize(2),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
assertThat(GeoUtils.geoHashCellSize(3),
equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 256, 2))));
assertThat(GeoUtils.geoHashCellSize(4),
@@ -167,13 +169,20 @@ public void testQuadTreeCellHeight() {
public void testQuadTreeCellSize() {
double equatorialDistance = 2 * Math.PI * 6378137.0;
double polarDistance = Math.PI * 6356752.314245;
- assertThat(GeoUtils.quadTreeCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
- assertThat(GeoUtils.quadTreeCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2))));
- assertThat(GeoUtils.quadTreeCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2))));
- assertThat(GeoUtils.quadTreeCellSize(3), equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2))));
- assertThat(GeoUtils.quadTreeCellSize(4), equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2))));
- assertThat(GeoUtils.quadTreeCellSize(5), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
- assertThat(GeoUtils.quadTreeCellSize(6), equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(0),
+ equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(1),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(2),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(3),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(4),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(5),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(6),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2))));
assertThat(GeoUtils.quadTreeCellSize(7),
equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 128, 2))));
assertThat(GeoUtils.quadTreeCellSize(8),
@@ -423,7 +432,8 @@ public void testParseGeoPointStringZValueError() throws IOException {
while (parser.currentToken() != Token.VALUE_STRING) {
parser.nextToken();
}
- Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false));
+ Exception e = expectThrows(ElasticsearchParseException.class,
+ () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false));
assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]"));
}
}
@@ -506,7 +516,8 @@ public void testParseGeoPointLatWrongType() throws IOException {
public void testParseGeoPointExtraField() throws IOException {
double lat = 0.0;
double lon = 0.0;
- XContentBuilder json = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("foo", true).endObject();
+ XContentBuilder json = jsonBuilder().startObject()
+ .field("lat", lat).field("lon", lon).field("foo", true).endObject();
try (XContentParser parser = createParser(json)) {
parser.nextToken();
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
@@ -518,7 +529,8 @@ public void testParseGeoPointLonLatGeoHash() throws IOException {
double lat = 0.0;
double lon = 0.0;
String geohash = "abcd";
- XContentBuilder json = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("geohash", geohash).endObject();
+ XContentBuilder json = jsonBuilder().startObject()
+ .field("lat", lat).field("lon", lon).field("geohash", geohash).endObject();
try (XContentParser parser = createParser(json)) {
parser.nextToken();
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
@@ -536,7 +548,8 @@ public void testParseGeoPointArrayTooManyValues() throws IOException {
parser.nextToken();
}
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
- assertThat(e.getMessage(), is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]"));
+ assertThat(e.getMessage(),
+ is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]"));
}
}
@@ -599,7 +612,8 @@ public void testPrefixTreeCellSizes() {
assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
assertThat(GeoUtils.geoHashLevelsForPrecision(size), equalTo(geohashPrefixTree.getLevelForDistance(degrees)));
- assertThat("width at level " + i, gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("width at level " + i,
+ gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
assertThat("height at level " + i, gNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height
/ GeoUtils.EARTH_POLAR_DISTANCE));
@@ -620,7 +634,8 @@ public void testPrefixTreeCellSizes() {
assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
assertThat(GeoUtils.quadTreeLevelsForPrecision(size), equalTo(quadPrefixTree.getLevelForDistance(degrees)));
- assertThat("width at level " + i, qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("width at level " + i,
+ qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
assertThat("height at level " + i, qNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height
/ GeoUtils.EARTH_POLAR_DISTANCE));
@@ -629,11 +644,16 @@ public void testPrefixTreeCellSizes() {
}
public void testParseGeoPointGeohashPositions() throws IOException {
- assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.TOP_LEFT), new GeoPoint(42.890625, -71.71875));
- assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.TOP_RIGHT), new GeoPoint(42.890625, -71.3671875));
- assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.71484375, -71.71875));
- assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.BOTTOM_RIGHT), new GeoPoint(42.71484375, -71.3671875));
- assertNormalizedPoint(parseGeohash("drtk", GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.890625, -71.3671875));
+ assertNormalizedPoint(parseGeohash("drt5",
+ GeoUtils.EffectivePoint.TOP_LEFT), new GeoPoint(42.890625, -71.71875));
+ assertNormalizedPoint(parseGeohash("drt5",
+ GeoUtils.EffectivePoint.TOP_RIGHT), new GeoPoint(42.890625, -71.3671875));
+ assertNormalizedPoint(parseGeohash("drt5",
+ GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.71484375, -71.71875));
+ assertNormalizedPoint(parseGeohash("drt5",
+ GeoUtils.EffectivePoint.BOTTOM_RIGHT), new GeoPoint(42.71484375, -71.3671875));
+ assertNormalizedPoint(parseGeohash("drtk",
+ GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.890625, -71.3671875));
}
private GeoPoint parseGeohash(String geohash, GeoUtils.EffectivePoint effectivePoint) throws IOException {
diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java
index f64a9e38b871a..6fb77bfb37f7a 100644
--- a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java
+++ b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java
@@ -221,8 +221,10 @@ public void testNestedSorting() throws Exception {
IndexSearcher searcher = new IndexSearcher(directoryReader);
Query parentFilter = new TermQuery(new Term("__type", "parent"));
Query childFilter = Queries.not(parentFilter);
- XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
- ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
+ XFieldComparatorSource nestedComparatorSource =
+ createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
+ ToParentBlockJoinQuery query =
+ new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, 5, sort);
@@ -255,7 +257,8 @@ public void testNestedSorting() throws Exception {
assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
childFilter = new TermQuery(new Term("filter_1", "T"));
- nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
+ nestedComparatorSource =
+ createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
query = new ToParentBlockJoinQuery(
new ConstantScoreQuery(childFilter),
new QueryBitSetProducer(parentFilter),
@@ -291,7 +294,8 @@ public void testNestedSorting() throws Exception {
assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
- nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter));
+ nestedComparatorSource =
+ createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter));
sort = new Sort(new SortField("field2", nestedComparatorSource, true));
topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
assertThat(topDocs.totalHits.value, equalTo(8L));
@@ -307,7 +311,8 @@ public void testNestedSorting() throws Exception {
assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(8));
- nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
+ nestedComparatorSource =
+ createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
sort = new Sort(new SortField("field2", nestedComparatorSource));
topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
assertThat(topDocs.totalHits.value, equalTo(8L));
@@ -332,8 +337,10 @@ public void testNestedSorting() throws Exception {
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException {
MultiValueMode sortMode = MultiValueMode.AVG;
Query childFilter = Queries.not(parentFilter);
- XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
- Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
+ XFieldComparatorSource nestedComparatorSource =
+ createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
+ Query query =
+ new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopDocs topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));
@@ -352,6 +359,7 @@ protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) th
protected abstract IndexableField createField(String name, int value);
- protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested);
+ protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
+ Object missingValue, Nested nested);
}
diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
index 93945231e2b6f..3b2929796333e 100644
--- a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
@@ -51,7 +51,8 @@ protected String getFieldDataType() {
}
@Override
- protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
+ protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
+ Object missingValue, Nested nested) {
IndexNumericFieldData fieldData = getForField(fieldName);
return new DoubleValuesComparatorSource(fieldData, missingValue, sortMode, nested);
}
@@ -65,8 +66,10 @@ protected IndexableField createField(String name, int value) {
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException {
MultiValueMode sortMode = MultiValueMode.AVG;
Query childFilter = Queries.not(parentFilter);
- XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
- Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
+ XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127,
+ createNested(searcher, parentFilter, childFilter));
+ Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter),
+ new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopDocs topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));
diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
index 2d1ffb1e1a344..bf1a28889403c 100644
--- a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
@@ -51,7 +51,8 @@ protected String getFieldDataType() {
}
@Override
- protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
+ protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
+ Object missingValue, Nested nested) {
IndexNumericFieldData fieldData = getForField(fieldName);
return new FloatValuesComparatorSource(fieldData, missingValue, sortMode, nested);
}
@@ -61,11 +62,14 @@ protected IndexableField createField(String name, int value) {
return new SortedNumericDocValuesField(name, NumericUtils.floatToSortableInt(value));
}
- protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher,
+ IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
MultiValueMode sortMode = MultiValueMode.AVG;
Query childFilter = Queries.not(parentFilter);
- XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
- Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
+ XFieldComparatorSource nestedComparatorSource =
+ createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
+ Query query =
+ new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopDocs topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));
diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
index c8f2a5f9dfac3..1e271c47d6f07 100644
--- a/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
@@ -34,7 +34,8 @@ protected String getFieldDataType() {
}
@Override
- protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
+ protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
+ Object missingValue, Nested nested) {
IndexNumericFieldData fieldData = getForField(fieldName);
return new LongValuesComparatorSource(fieldData, missingValue, sortMode, nested);
}
diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
index 0bee6eeb6ed12..0dccf5937dca7 100644
--- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
+++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
@@ -129,7 +129,8 @@ public void testDuel() throws Exception {
searcher.getIndexReader().close();
}
- private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData> indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException {
+ private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData> indexFieldData, String missingValue,
+ MultiValueMode sortMode, int n, boolean reverse) throws IOException {
Query parentFilter = new TermQuery(new Term("__type", "parent"));
Query childFilter = new TermQuery(new Term("__type", "child"));
SortField sortField = indexFieldData.sortField(missingValue, sortMode, createNested(searcher, parentFilter, childFilter), reverse);
@@ -299,8 +300,10 @@ public void testNestedSorting() throws Exception {
PagedBytesIndexFieldData indexFieldData = getForField("field2");
Query parentFilter = new TermQuery(new Term("__type", "parent"));
Query childFilter = Queries.not(parentFilter);
- BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
- ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
+ BytesRefFieldComparatorSource nestedComparatorSource =
+ new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
+ ToParentBlockJoinQuery query =
+ new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, 5, sort);
@@ -318,7 +321,8 @@ public void testNestedSorting() throws Exception {
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i"));
sortMode = MultiValueMode.MAX;
- nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
+ nestedComparatorSource =
+ new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
sort = new Sort(new SortField("field2", nestedComparatorSource, true));
topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));
@@ -339,7 +343,8 @@ public void testNestedSorting() throws Exception {
bq.add(parentFilter, Occur.MUST_NOT);
bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST);
childFilter = bq.build();
- nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
+ nestedComparatorSource =
+ new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
query = new ToParentBlockJoinQuery(
new ConstantScoreQuery(childFilter),
new QueryBitSetProducer(parentFilter),
@@ -707,7 +712,8 @@ public void testMultiLevelNestedSorting() throws IOException {
.setFilter(queryBuilder)
.setNestedSort(new NestedSortBuilder("chapters.paragraphs"))
);
- topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
+ topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
+ sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
@@ -715,7 +721,8 @@ public void testMultiLevelNestedSorting() throws IOException {
assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(87L));
sortBuilder.order(SortOrder.DESC);
- topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
+ topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
+ sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
@@ -735,7 +742,8 @@ public void testMultiLevelNestedSorting() throws IOException {
.setFilter(new RangeQueryBuilder("chapters.paragraphs.word_count").from(80L))
)
);
- topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
+ topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
+ sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
@@ -743,7 +751,8 @@ public void testMultiLevelNestedSorting() throws IOException {
assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(Long.MAX_VALUE));
sortBuilder.order(SortOrder.DESC);
- topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
+ topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
+ sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
index 579764d671cba..4042d33568d6a 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
@@ -227,10 +227,13 @@ public void testUpdatePriority() {
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 200)));
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
- assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
- client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
+ assertEquals(200,
+ indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
.build()).get();
- assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
+ assertEquals(400,
+ indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
}
public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
@@ -242,7 +245,8 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
.build();
createIndex("test", idxSettings);
ensureGreen("test");
- client().prepareIndex("test", "bar", "1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
+ client().prepareIndex("test", "bar", "1")
+ .setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
SearchResponse response = client().prepareSearch("test").get();
assertHitCount(response, 1L);
client().admin().indices().prepareDelete("test").get();
@@ -513,7 +517,8 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable {
IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
client().prepareDelete("test", "test", "0").get();
- client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
+ client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON)
+ .setRefreshPolicy(IMMEDIATE).get();
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
shard.close("simon says", false);
@@ -579,14 +584,17 @@ public void testCircuitBreakerIncrementedByIndexShard() throws Exception {
.setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get();
// Generate a couple of segments
- client().prepareIndex("test", "_doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
- .setRefreshPolicy(IMMEDIATE).get();
+ client().prepareIndex("test", "_doc", "1")
+ .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
+ .setRefreshPolicy(IMMEDIATE).get();
// Use routing so 2 documents are guaranteed to be on the same shard
String routing = randomAlphaOfLength(5);
- client().prepareIndex("test", "_doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
- .setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
- client().prepareIndex("test", "_doc", "3").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
- .setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
+ client().prepareIndex("test", "_doc", "2")
+ .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
+ .setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
+ client().prepareIndex("test", "_doc", "3")
+ .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
+ .setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
checkAccountingBreaker();
// Test that force merging causes the breaker to be correctly adjusted
@@ -600,7 +608,8 @@ public void testCircuitBreakerIncrementedByIndexShard() throws Exception {
// Test that we're now above the parent limit due to the segments
Exception e = expectThrows(Exception.class,
- () -> client().prepareSearch("test").addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get());
+ () -> client().prepareSearch("test")
+ .addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get());
logger.info("--> got: {}", ExceptionsHelper.detailedMessage(e));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("[parent] Data too large, data for []"));
@@ -631,9 +640,10 @@ public static final IndexShard newIndexShard(IndexService indexService, IndexSha
CircuitBreakerService cbs, IndexingOperationListener... listeners) throws IOException {
ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(),
- shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(),
- shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
- indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners), () -> {}, cbs);
+ shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(),
+ indexService.similarityService(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
+ indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners),
+ () -> {}, cbs);
return newShard;
}
@@ -739,7 +749,9 @@ public void testPendingRefreshWithIntervalChange() throws InterruptedException {
}
public void testGlobalCheckpointListeners() throws Exception {
- createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build());
+ createIndex("test", Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService test = indicesService.indexService(resolveIndex("test"));
@@ -785,7 +797,9 @@ public void testGlobalCheckpointListeners() throws Exception {
}
public void testGlobalCheckpointListenerTimeout() throws InterruptedException {
- createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build());
+ createIndex("test", Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService test = indicesService.indexService(resolveIndex("test"));
@@ -812,7 +826,9 @@ public void testGlobalCheckpointListenerTimeout() throws InterruptedException {
}
public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Exception {
- createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
+ createIndex("test", Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
.put("index.refresh_interval", -1).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
@@ -838,7 +854,8 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti
}
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
- assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs, (long) searcher.reader().numDocs(), equalTo(numDocs + moreDocs));
+ assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs,
+ (long) searcher.reader().numDocs(), equalTo(numDocs + moreDocs));
}
assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs,
client().search(countRequest).actionGet().getHits().totalHits, equalTo(numDocs + moreDocs));
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 23380f9c171f8..8eede7542bd75 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -613,7 +613,8 @@ public void onFailure(Exception e) {
fail();
}
},
- ThreadPool.Names.WRITE, "")).getMessage(), containsString("in primary mode cannot be a replication target"));
+ ThreadPool.Names.WRITE, "")).getMessage(),
+ containsString("in primary mode cannot be a replication target"));
}
closeShards(indexShard);
@@ -720,7 +721,8 @@ public void onFailure(Exception e) {
assertTrue(onFailure.get());
assertThat(onFailureException.get(), instanceOf(IllegalStateException.class));
assertThat(
- onFailureException.get(), hasToString(containsString("operation primary term [" + (primaryTerm - 1) + "] is too old")));
+ onFailureException.get(),
+ hasToString(containsString("operation primary term [" + (primaryTerm - 1) + "] is too old")));
}
{
@@ -869,7 +871,8 @@ public void testGlobalCheckpointSync() throws IOException {
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
- final IndexMetaData.Builder indexMetadata = IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1);
+ final IndexMetaData.Builder indexMetadata =
+ IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1);
final AtomicBoolean synced = new AtomicBoolean();
final IndexShard primaryShard =
newShard(shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), () -> synced.set(true));
@@ -1237,7 +1240,8 @@ public void testShardStatsWithFailures() throws IOException {
allowShardFailures();
final ShardId shardId = new ShardId("index", "_na_", 0);
final ShardRouting shardRouting =
- newShardRouting(shardId, "node", true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
+ newShardRouting(shardId, "node", true,
+ ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
@@ -1771,7 +1775,8 @@ public void testPrimaryHandOffUpdatesLocalCheckpoint() throws IOException {
for (int i = 0; i < totalOps; i++) {
indexDoc(primarySource, "_doc", Integer.toString(i));
}
- IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1));
+ IndexShardTestCase.updateRoutingEntry(primarySource,
+ primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1));
final IndexShard primaryTarget = newShard(primarySource.routingEntry().getTargetRelocatingShard());
updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetaData());
recoverReplica(primaryTarget, primarySource, true);
@@ -1794,7 +1799,8 @@ public void testRecoverFromStoreWithNoOps() throws IOException {
updateMappings(otherShard, shard.indexSettings().getIndexMetaData());
SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "_doc", "1",
new BytesArray("{}"), XContentType.JSON);
- otherShard.applyIndexOperationOnReplica(1, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
+ otherShard.applyIndexOperationOnReplica(1, 1,
+ IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
final ShardRouting primaryShardRouting = shard.routingEntry();
IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting,
@@ -1880,7 +1886,8 @@ public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception {
assertTrue(ex.getMessage().contains("failed to fetch index version after copying it over"));
}
- routing = ShardRoutingHelper.moveToUnassigned(routing, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so"));
+ routing = ShardRoutingHelper.moveToUnassigned(routing,
+ new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so"));
routing = ShardRoutingHelper.initialize(routing, newShard.routingEntry().currentNodeId());
assertTrue("it's already recovering, we should ignore new ones", newShard.ignoreRecoveryAttempt());
try {
@@ -2019,7 +2026,8 @@ public void testSearcherWrapperIsUsed() throws IOException {
shard.refresh("test");
try (Engine.GetResult getResult = shard
- .get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
+ .get(new Engine.Get(false, false, "test", "1",
+ new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
assertTrue(getResult.exists());
assertNotNull(getResult.searcher());
}
@@ -2060,7 +2068,8 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
assertEquals(search.totalHits.value, 1);
}
try (Engine.GetResult getResult = newShard
- .get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
+ .get(new Engine.Get(false, false, "test", "1",
+ new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
assertTrue(getResult.exists());
assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader);
@@ -2476,7 +2485,8 @@ public void testRecoverFromLocalShard() throws IOException {
}
assertThat(requestedMappingUpdates, hasKey("_doc"));
- assertThat(requestedMappingUpdates.get("_doc").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
+ assertThat(requestedMappingUpdates.get("_doc").get().source().string(),
+ equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
closeShards(sourceShard, targetShard);
}
@@ -2843,7 +2853,8 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception {
.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum")))
.build();
final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData,
- null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER);
+ null, null, indexShard.engineFactory,
+ indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER);
Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata();
assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1);
@@ -2913,7 +2924,8 @@ private Result indexOnReplicaWithGaps(
final String id = Integer.toString(i);
SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", id,
new BytesArray("{}"), XContentType.JSON);
- indexShard.applyIndexOperationOnReplica(i, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
+ indexShard.applyIndexOperationOnReplica(i, 1,
+ IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
if (!gap && i == localCheckpoint + 1) {
localCheckpoint++;
}
@@ -3346,14 +3358,16 @@ public void testFlushOnInactive() throws Exception {
.putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1).build();
- ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState
- .INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
+ ShardRouting shardRouting =
+ TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true,
+ ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
final ShardId shardId = shardRouting.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
AtomicBoolean markedInactive = new AtomicBoolean();
AtomicReference primaryRef = new AtomicReference<>();
- IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, new InternalEngineFactory(), () -> {
+ IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null,
+ new InternalEngineFactory(), () -> {
}, new IndexEventListener() {
@Override
public void onShardInactive(IndexShard indexShard) {
diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java
index fda2f8ef7d039..c666c63a4b4e6 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java
@@ -43,8 +43,10 @@ public void testLoadShardPath() throws IOException {
ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0);
Path[] paths = env.availableShardPaths(shardId);
Path path = randomFrom(paths);
- ShardStateMetaData.FORMAT.write(new ShardStateMetaData(true, "0xDEADBEEF", AllocationId.newInitializing()), path);
- ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(true, "0xDEADBEEF",
+ AllocationId.newInitializing()), path);
+ ShardPath shardPath =
+ ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
assertEquals(path, shardPath.getDataPath());
assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID());
assertEquals("foo", shardPath.getShardId().getIndexName());
@@ -87,7 +89,8 @@ public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException {
public void testIllegalCustomDataPath() {
Index index = new Index("foo", "foo");
final Path path = createTempDir().resolve(index.getUUID()).resolve("0");
- Exception e = expectThrows(IllegalArgumentException.class, () -> new ShardPath(true, path, path, new ShardId(index, 0)));
+ Exception e = expectThrows(IllegalArgumentException.class, () ->
+ new ShardPath(true, path, path, new ShardId(index, 0)));
assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths"));
}
diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java
index 14ef65c368cf6..9fc83cd39de06 100644
--- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java
+++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java
@@ -146,7 +146,8 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
- .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
+ // no translog based flush - it might change the .liv / segments.N files
+ .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
));
ensureGreen();
disableAllocation("test");
@@ -222,7 +223,8 @@ public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard,
}
};
- for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
+ for (MockIndexEventListener.TestEventListener eventListener :
+ internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
eventListener.setNewDelegate(listener);
}
try {
@@ -230,7 +232,8 @@ public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard,
latch.await();
assertThat(exception, empty());
} finally {
- for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
+ for (MockIndexEventListener.TestEventListener eventListener :
+ internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
eventListener.setNewDelegate(null);
}
}
@@ -248,7 +251,8 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
- .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
+ // no translog based flush - it might change the .liv / segments.N files
+ .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
@@ -284,7 +288,8 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted
}
assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
ClusterState state = client().admin().cluster().prepareState().get().getState();
- GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
+ GroupShardsIterator shardIterators =
+ state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
for (ShardIterator iterator : shardIterators) {
ShardRouting routing;
while ((routing = iterator.nextOrNull()) != null) {
@@ -338,8 +343,10 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc
final AtomicBoolean corrupt = new AtomicBoolean(true);
final CountDownLatch hasCorrupted = new CountDownLatch(1);
for (NodeStats dataNode : dataNodeStats) {
- MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
- mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> {
+ MockTransportService mockTransportService =
+ ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
+ mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
+ (connection, requestId, action, request, options) -> {
if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes;
@@ -353,7 +360,8 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc
Settings build = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
- .put("index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build();
+ .put("index.routing.allocation.include._name",
+ primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
hasCorrupted.await();
@@ -369,7 +377,9 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
if (cluster().numDataNodes() < 3) {
- internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
+ internalCluster().startNode(Settings.builder()
+ .put(Node.NODE_DATA_SETTING.getKey(), true)
+ .put(Node.NODE_MASTER_SETTING.getKey(), false));
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List dataNodeStats = new ArrayList<>();
@@ -406,14 +416,17 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte
assertHitCount(countResponse, numDocs);
final boolean truncate = randomBoolean();
for (NodeStats dataNode : dataNodeStats) {
- MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
- mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> {
+ MockTransportService mockTransportService =
+ ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
+ mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
+ (connection, requestId, action, request, options) -> {
if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
if (truncate && req.length() > 1) {
BytesRef bytesRef = req.content().toBytesRef();
BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
- request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
+ request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(),
+ array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
} else {
assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!";
final byte[] array = req.content().toBytesRef().bytes;
@@ -466,12 +479,16 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
- assertAcked(prepareCreate("test").setSettings(Settings.builder()
- .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
- .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
- .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
- .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
- ));
+ assertAcked(
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
+ .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
+ // no checkindex - we corrupt shards on purpose
+ .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
+ // no translog based flush - it might change the .liv / segments.N files
+ .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
+ )
+ );
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
@@ -496,10 +513,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
logger.info("--> snapshot");
- final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
- .setWaitForCompletion(true)
- .setIndices("test")
- .get();
+ final CreateSnapshotResponse createSnapshotResponse =
+ client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
+ .setWaitForCompletion(true)
+ .setIndices("test")
+ .get();
final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state();
logger.info("--> snapshot terminated with state " + snapshotState);
final List files = listShardFiles(shardRouting);
@@ -623,7 +641,8 @@ private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFile
Set files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
for (FsInfo.Path info : nodeStatses.getNodes().get(0).getFs()) {
String path = info.getPath();
- Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
+ Path file = PathUtils.get(path)
+ .resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
if (Files.exists(file)) { // multi data path might only have one path in use
try (DirectoryStream stream = Files.newDirectoryStream(file)) {
for (Path item : stream) {
@@ -666,8 +685,16 @@ private void pruneOldDeleteGenerations(Set files) {
final String newSegmentName = IndexFileNames.parseSegmentName(current.getFileName().toString());
final String oldSegmentName = IndexFileNames.parseSegmentName(last.getFileName().toString());
if (newSegmentName.equals(oldSegmentName)) {
- int oldGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""), Character.MAX_RADIX);
- int newGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""), Character.MAX_RADIX);
+ int oldGen =
+ Integer.parseInt(
+ IndexFileNames.stripExtension(
+ IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""),
+ Character.MAX_RADIX
+ );
+ int newGen = Integer.parseInt(
+ IndexFileNames.stripExtension(
+ IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""),
+ Character.MAX_RADIX);
if (newGen > oldGen) {
files.remove(last);
} else {
diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java
index 71284792a6817..7936e8efd5624 100644
--- a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java
+++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java
@@ -91,7 +91,8 @@ public void testCorruptTranslogFiles() throws Exception {
// Restart the single node
internalCluster().fullRestart();
- client().admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get();
+ client().admin().cluster().prepareHealth().setWaitForYellowStatus().
+ setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get();
try {
client().prepareSearch("test").setQuery(matchAllQuery()).get();
@@ -130,13 +131,15 @@ private void corruptRandomTranslogFile() throws IOException {
/** Disables translog flushing for the specified index */
private static void disableTranslogFlush(String index) {
- Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build();
+ Settings settings = Settings.builder()
+ .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build();
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
}
/** Enables translog flushing for the specified index */
private static void enableTranslogFlush(String index) {
- Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build();
+ Settings settings = Settings.builder()
+ .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build();
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
}
}
diff --git a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java
index 6fa32df288512..155473c83cf30 100644
--- a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java
+++ b/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java
@@ -60,7 +60,8 @@ private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingV
}
Settings settings = settingsBuilder.build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings);
- FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0)));
+ FsDirectoryService service = new FsDirectoryService(indexSettings, null,
+ new ShardPath(false, tempDir, tempDir, new ShardId(index, 0)));
try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) {
switch (type) {
case NIOFS:
diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java
index 6546e6ebc8c61..7cf14bf84e331 100644
--- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java
+++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java
@@ -193,9 +193,10 @@ public void testVerifyingIndexOutput() throws IOException {
public void testVerifyingIndexOutputOnEmptyFile() throws IOException {
Directory dir = newDirectory();
- IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0),
- MIN_SUPPORTED_LUCENE_VERSION),
- dir.createOutput("foo1.bar", IOContext.DEFAULT));
+ IndexOutput verifyingOutput =
+ new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0),
+ MIN_SUPPORTED_LUCENE_VERSION),
+ dir.createOutput("foo1.bar", IOContext.DEFAULT));
try {
Store.verify(verifyingOutput);
fail("should be a corrupted index");
@@ -296,13 +297,15 @@ public void testNewChecksums() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// set default codec - all segments need checksums
- IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
+ IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(),
+ new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
@@ -311,7 +314,8 @@ public void testNewChecksums() throws IOException {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
@@ -459,10 +463,13 @@ public void assertDeleteContent(Store store, Directory dir) throws IOException {
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
for (String file : store.directory().listAll()) {
- if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
- assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
+ if (!IndexWriter.WRITE_LOCK_NAME.equals(file) &&
+ !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
+ assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " +
+ store.directory().listAll().length, metadata.asMap().containsKey(file));
} else {
- assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
+ assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " +
+ store.directory().listAll().length, metadata.asMap().containsKey(file));
}
}
}
@@ -473,7 +480,8 @@ public void testRecoveryDiff() throws IOException, InterruptedException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
@@ -595,14 +603,17 @@ public void testRecoveryDiff() throws IOException, InterruptedException {
Store.MetadataSnapshot newCommitMetaData = store.getMetadata(null);
Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
if (delFile != null) {
- assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
+ assertThat(newCommitDiff.identical.size(),
+ equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
} else {
- assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
+ assertThat(newCommitDiff.identical.size(),
+ equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
assertThat(newCommitDiff.different.size(), equalTo(0));
- assertThat(newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different
+ assertThat(newCommitDiff.missing.size(),
+ equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different
}
deleteContent(store.directory());
@@ -613,7 +624,8 @@ public void testCleanupFromSnapshot() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// this time random codec....
- IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
+ IndexWriterConfig indexWriterConfig =
+ newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
// we keep all commits and that allows us clean based on multiple snapshots
indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
@@ -626,7 +638,8 @@ public void testCleanupFromSnapshot() throws IOException {
}
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
@@ -635,7 +648,8 @@ public void testCleanupFromSnapshot() throws IOException {
writer.commit();
Document doc = new Document();
doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
@@ -647,7 +661,8 @@ public void testCleanupFromSnapshot() throws IOException {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
@@ -695,7 +710,8 @@ public void testCleanupFromSnapshot() throws IOException {
}
public void testOnCloseCallback() throws IOException {
- final ShardId shardId = new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100));
+ final ShardId shardId =
+ new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100));
final AtomicInteger count = new AtomicInteger(0);
final ShardLock lock = new DummyShardLock(shardId);
@@ -797,8 +813,10 @@ public void testMetadataSnapshotStreaming() throws Exception {
}
protected Store.MetadataSnapshot createMetaDataSnapshot() {
- StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
- StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
+ StoreFileMetaData storeFileMetaData1 =
+ new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
+ StoreFileMetaData storeFileMetaData2 =
+ new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
Map storeFileMetaDataMap = new HashMap<>();
storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
@@ -839,7 +857,9 @@ public void testUserDataRead() throws IOException {
public void testStreamStoreFilesMetaData() throws Exception {
Store.MetadataSnapshot metadataSnapshot = createMetaDataSnapshot();
- TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData = new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0),metadataSnapshot);
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData =
+ new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0),
+ metadataSnapshot);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
org.elasticsearch.Version targetNodeVersion = randomVersion(random());
@@ -848,7 +868,8 @@ public void testStreamStoreFilesMetaData() throws Exception {
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
- TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in);
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData =
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in);
Iterator outFiles = outStoreFileMetaData.iterator();
for (StoreFileMetaData inFile : inStoreFileMetaData) {
assertThat(inFile.name(), equalTo(outFiles.next().name()));
@@ -867,7 +888,8 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
- doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body",
+ TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
@@ -893,7 +915,8 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException {
// expected
}
assertTrue(store.isMarkedCorrupted());
- Lucene.cleanLuceneIndex(store.directory()); // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call
+ // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call
+ Lucene.cleanLuceneIndex(store.directory());
store.close();
}
@@ -962,7 +985,8 @@ public void testCanReadOldCorruptionMarker() throws IOException {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
- assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +"] caused by: CorruptIndexException[foo (resource=bar)]"));
+ assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +
+ "] caused by: CorruptIndexException[foo (resource=bar)]"));
assertTrue(e.getMessage().contains(ExceptionsHelper.stackTrace(exception)));
}
@@ -977,7 +1001,8 @@ public void testCanReadOldCorruptionMarker() throws IOException {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
- assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + "] caused by: CorruptIndexException[foo (resource=bar)]"));
+ assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +
+ "] caused by: CorruptIndexException[foo (resource=bar)]"));
assertFalse(e.getMessage().contains(ExceptionsHelper.stackTrace(exception)));
}
diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
index 19c25fed0725d..915cbdd260cef 100644
--- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
+++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
@@ -524,7 +524,8 @@ public void testUncommittedOperations() throws Exception {
}
public void testTotalTests() {
- final TranslogStats total = new TranslogStats(0, 0, 0, 0, 1);
+ final TranslogStats total =
+ new TranslogStats(0, 0, 0, 0, 1);
final int n = randomIntBetween(0, 16);
final List statsList = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
@@ -552,21 +553,27 @@ public void testTotalTests() {
}
public void testNegativeNumberOfOperations() {
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1, 1));
+ IllegalArgumentException e =
+ expectThrows(IllegalArgumentException.class,
+ () -> new TranslogStats(-1, 1, 1, 1, 1));
assertThat(e, hasToString(containsString("numberOfOperations must be >= 0")));
- e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1, 1));
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new TranslogStats(1, 1, -1, 1, 1));
assertThat(e, hasToString(containsString("uncommittedOperations must be >= 0")));
}
public void testNegativeSizeInBytes() {
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1));
+ IllegalArgumentException e =
+ expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1));
assertThat(e, hasToString(containsString("translogSizeInBytes must be >= 0")));
- e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1, 1));
+ e = expectThrows(IllegalArgumentException.class,
+ () -> new TranslogStats(1, 1, 1, -1, 1));
assertThat(e, hasToString(containsString("uncommittedSizeInBytes must be >= 0")));
}
public void testOldestEntryInSeconds() {
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1));
+ IllegalArgumentException e =
+ expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1));
assertThat(e, hasToString(containsString("earliestLastModifiedAge must be >= 0")));
}
@@ -687,7 +694,8 @@ public void testSeqNoFilterSnapshot() throws Exception {
List batch = LongStream.rangeClosed(0, between(0, 100)).boxed().collect(Collectors.toList());
Randomness.shuffle(batch);
for (long seqNo : batch) {
- Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
+ Translog.Index op =
+ new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
translog.add(op);
}
translog.rollGeneration();
@@ -767,7 +775,8 @@ public void testConcurrentWritesWithVaryingSize() throws Throwable {
final CountDownLatch downLatch = new CountDownLatch(1);
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
- threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
+ threads[i] =
+ new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
threads[i].setDaemon(true);
threads[i].start();
}
@@ -832,7 +841,9 @@ public void testTranslogCorruption() throws Exception {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
+ locations.add(
+ translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))
+ );
}
translog.close();
@@ -859,7 +870,9 @@ public void testTruncatedTranslogs() throws Exception {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
+ locations.add(translog.add(
+ new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))
+ );
}
translog.sync();
@@ -1118,13 +1131,16 @@ public void testSyncUpTo() throws IOException {
for (int op = 0; op < translogOperations; op++) {
int seqNo = ++count;
final Translog.Location location =
- translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(),
+ Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(location));
- assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
+ // we are the last location so everything should be synced
+ assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded());
seqNo = ++count;
- translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(),
+ Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
assertTrue("one pending operation", translog.syncNeeded());
assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now
assertTrue("we only synced a previous operation yet", translog.syncNeeded());
@@ -1153,17 +1169,20 @@ public void testSyncUpToStream() throws IOException {
rollAndCommit(translog); // do this first so that there is at least one pending tlog entry
}
final Translog.Location location =
- translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
locations.add(location);
}
Collections.shuffle(locations, random());
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream()));
- assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
+ // we are the last location so everything should be synced
+ assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded());
} else if (rarely()) {
rollAndCommit(translog);
- assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now
+ // not syncing now
+ assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream()));
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
} else {
translog.sync();
@@ -1181,7 +1200,8 @@ public void testLocationComparison() throws IOException {
int count = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(
- translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
+ translog.add(new Translog.Index("test", "" + op, op,
+ primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
if (rarely() && translogOperations > op + 1) {
rollAndCommit(translog);
}
@@ -1218,7 +1238,8 @@ public void testBasicCheckpoint() throws IOException {
int lastSynced = -1;
long lastSyncedGlobalCheckpoint = globalCheckpoint.get();
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16));
}
@@ -1233,7 +1254,8 @@ public void testBasicCheckpoint() throws IOException {
Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME));
- try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
+ try (TranslogReader reader =
+ translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
assertEquals(lastSynced + 1, reader.totalOperations());
TranslogSnapshot snapshot = reader.newSnapshot();
@@ -1276,7 +1298,8 @@ public void testTranslogWriter() throws IOException {
}
writer.sync();
- final BaseTranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
+ final BaseTranslogReader reader = randomBoolean() ? writer :
+ translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
for (int i = 0; i < numOps; i++) {
ByteBuffer buffer = ByteBuffer.allocate(4);
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i);
@@ -1354,7 +1377,8 @@ public void testBasicRecovery() throws IOException {
int minUncommittedOp = -1;
final boolean commitOften = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
final boolean commit = commitOften ? frequently() : rarely();
if (commit && op < translogOperations - 1) {
rollAndCommit(translog);
@@ -1375,8 +1399,10 @@ public void testBasicRecovery() throws IOException {
assertNull(snapshot.next());
}
} else {
- translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
- assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
+ translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(),
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
+ assertEquals("lastCommitted must be 1 less than current",
+ translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) {
for (int i = minUncommittedOp; i < translogOperations; i++) {
@@ -1397,7 +1423,8 @@ public void testRecoveryUncommitted() throws IOException {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@@ -1414,9 +1441,11 @@ public void testRecoveryUncommitted() throws IOException {
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
- try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
- assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
+ assertEquals("lastCommitted must be 2 less than current - we never finished the commit",
+ translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) {
int upTo = sync ? translogOperations : prepareOp;
@@ -1428,7 +1457,8 @@ public void testRecoveryUncommitted() throws IOException {
}
}
if (randomBoolean()) { // recover twice
- try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice",
translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
@@ -1438,7 +1468,8 @@ public void testRecoveryUncommitted() throws IOException {
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
- assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
+ assertEquals("payload mismatch, synced: " + sync, i,
+ Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
@@ -1453,7 +1484,8 @@ public void testRecoveryUncommittedFileExists() throws IOException {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@@ -1474,9 +1506,11 @@ public void testRecoveryUncommittedFileExists() throws IOException {
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
- try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
- assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
+ assertEquals("lastCommitted must be 2 less than current - we never finished the commit",
+ translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) {
int upTo = sync ? translogOperations : prepareOp;
@@ -1489,7 +1523,8 @@ public void testRecoveryUncommittedFileExists() throws IOException {
}
if (randomBoolean()) { // recover twice
- try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice",
translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
@@ -1499,7 +1534,8 @@ public void testRecoveryUncommittedFileExists() throws IOException {
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
- assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
+ assertEquals("payload mismatch, synced: " + sync, i,
+ Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
@@ -1513,7 +1549,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op,
+ primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@@ -1528,21 +1565,28 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException {
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
- Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.NO_OPS_PERFORMED, 0);
- Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
+ Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0,
+ SequenceNumbers.NO_OPS_PERFORMED, 0);
+ Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)),
+ corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
- try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog ignored = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
fail("corrupted");
} catch (IllegalStateException ex) {
assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3025, " +
- "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} but got: Checkpoint{offset=0, numOps=0, " +
- "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage());
- }
- Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
- try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} " +
+ "but got: Checkpoint{offset=0, numOps=0, generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, " +
+ "minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage());
+ }
+ Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)),
+ read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
+ try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
- assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
+ assertEquals("lastCommitted must be 2 less than current - we never finished the commit",
+ translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) {
int upTo = sync ? translogOperations : prepareOp;
@@ -1560,7 +1604,8 @@ public void testSnapshotFromStreamInput() throws IOException {
List ops = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
- Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")));
+ Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")));
ops.add(test);
}
Translog.writeOperations(out, ops);
@@ -1687,7 +1732,8 @@ public void testRandomExceptionsOnTrimOperations( ) throws Exception {
TranslogConfig config = getTranslogConfig(tempDir);
List fileChannels = new ArrayList<>();
final Translog failableTLog =
- getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy(), fileChannels);
+ getFailableTranslog(fail, config, randomBoolean(),
+ false, null, createTranslogDeletionPolicy(), fileChannels);
IOException expectedException = null;
int translogOperations = 0;
@@ -1761,8 +1807,10 @@ public void testLocationHashCodeEquals() throws IOException {
int translogOperations = randomIntBetween(10, 100);
try (Translog translog2 = create(createTempDir())) {
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
- locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
}
int iters = randomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
@@ -1788,7 +1836,8 @@ public void testOpenForeignTranslog() throws IOException {
int translogOperations = randomIntBetween(1, 10);
int firstUncommitted = 0;
for (int op = 0; op < translogOperations; op++) {
- locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
rollAndCommit(translog);
firstUncommitted = op + 1;
@@ -1820,10 +1869,12 @@ public void testOpenForeignTranslog() throws IOException {
}
public void testFailOnClosedWrite() throws IOException {
- translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(),
+ Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
- translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(),
+ Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
fail("closed");
} catch (AlreadyClosedException ex) {
// all is well
@@ -1843,7 +1894,8 @@ public void testCloseConcurrently() throws Throwable {
final AtomicLong seqNoGenerator = new AtomicLong();
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
- threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
+ threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId,
+ writtenOperations, seqNoGenerator, threadExceptions);
threads[i].setDaemon(true);
threads[i].start();
}
@@ -1941,7 +1993,8 @@ public void testFailFlush() throws IOException {
while (failed == false) {
try {
locations.add(translog.add(
- new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
+ new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(),
+ Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
translog.sync();
opsSynced++;
} catch (MockDirectoryWrapper.FakeIOException ex) {
@@ -1962,7 +2015,8 @@ public void testFailFlush() throws IOException {
if (randomBoolean()) {
try {
locations.add(translog.add(
- new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
+ new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(),
+ Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
fail("we are already closed");
} catch (AlreadyClosedException ex) {
assertNotNull(ex.getCause());
@@ -1996,14 +2050,17 @@ public void testFailFlush() throws IOException {
translog.close(); // we are closed
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
- try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
- assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
+ try (Translog tlog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ assertEquals("lastCommitted must be 1 less than current",
+ translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
assertEquals(opsSynced, snapshot.totalOperations());
for (int i = 0; i < opsSynced; i++) {
- assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1,
+ assertEquals("expected operation" + i + " to be in the previous translog but wasn't",
+ tlog.currentFileGeneration() - 1,
locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
@@ -2019,11 +2076,13 @@ public void testTranslogOpsCountIsCorrect() throws IOException {
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
locations.add(translog.add(
- new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
+ new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(),
+ lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
try (Translog.Snapshot snapshot = this.translog.newSnapshot()) {
assertEquals(opsAdded + 1, snapshot.totalOperations());
for (int i = 0; i < opsAdded; i++) {
- assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(),
+ assertEquals("expected operation" + i + " to be in the current translog but wasn't",
+ translog.currentFileGeneration(),
locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
@@ -2036,13 +2095,16 @@ public void testTragicEventCanBeAnyException() throws IOException {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
- Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy());
+ Translog translog = getFailableTranslog(fail, config, false, true, null,
+ createTranslogDeletionPolicy());
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly
- translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(),
+ lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
fail.failAlways();
try {
Translog.Location location = translog.add(
- new Translog.Index("test", "2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
+ new Translog.Index("test", "2", 1, primaryTerm.get(),
+ lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
translog.ensureSynced(location);
} else {
@@ -2076,7 +2138,8 @@ public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException,
List writtenOperations = Collections.synchronizedList(new ArrayList<>());
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
- threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, seqNoGenerator, threadExceptions) {
+ threads[i] = new TranslogThread(translog, downLatch, 200, threadId,
+ writtenOperations, seqNoGenerator, threadExceptions) {
@Override
protected Translog.Location add(Translog.Operation op) throws IOException {
Translog.Location add = super.add(op);
@@ -2132,7 +2195,8 @@ protected void afterAdd() throws IOException {
}
}
try (Translog tlog =
- new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
+ new Translog(config, translogUUID, createTranslogDeletionPolicy(),
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
Translog.Snapshot snapshot = tlog.newSnapshot()) {
if (writtenOperations.size() != snapshot.totalOperations()) {
for (int i = 0; i < threadCount; i++) {
@@ -2143,7 +2207,8 @@ protected void afterAdd() throws IOException {
}
assertEquals(writtenOperations.size(), snapshot.totalOperations());
for (int i = 0; i < writtenOperations.size(); i++) {
- assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation);
+ assertEquals("expected operation" + i + " to be in the previous translog but wasn't",
+ tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals(next, writtenOperations.get(i).operation);
@@ -2159,7 +2224,8 @@ protected void afterAdd() throws IOException {
public void testRecoveryFromAFutureGenerationCleansUp() throws IOException {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
- translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@@ -2167,7 +2233,8 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException {
translog.rollGeneration();
long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
- translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@@ -2178,7 +2245,8 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException {
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE));
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
- translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
+ translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
assertThat(translog.getMinFileGeneration(), equalTo(1L));
// no trimming done yet, just recovered
for (long gen = 1; gen < translog.currentFileGeneration(); gen++) {
@@ -2209,7 +2277,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException {
translogUUID = translog.getTranslogUUID();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
- translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@@ -2217,7 +2286,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException {
translog.rollGeneration();
comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
- translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
+ Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@@ -2234,7 +2304,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException {
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE));
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
- try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
// we don't know when things broke exactly
assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L));
assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration));
@@ -2300,8 +2371,10 @@ private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig
}
boolean success = false;
try {
- final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation
- ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel);
+ // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation
+ final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp");
+ ThrowingFileChannel throwingFileChannel =
+ new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel);
success = true;
return throwingFileChannel;
} finally {
@@ -2337,7 +2410,8 @@ public static class ThrowingFileChannel extends FilterFileChannel {
private final boolean partialWrite;
private final boolean throwUnknownException;
- public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
+ public ThrowingFileChannel(FailSwitch fail, boolean partialWrite,
+ boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
super(delegate);
this.fail = fail;
this.partialWrite = partialWrite;
@@ -2426,7 +2500,8 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException {
translog.add(new Translog.Index("test", "boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
- new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) {
+ new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(),
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) {
@Override
protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint)
throws IOException {
@@ -2441,7 +2516,8 @@ protected TranslogWriter createWriter(long fileGeneration, long initialMinTransl
}
public void testRecoverWithUnbackedNextGen() throws IOException {
- translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(),
+ Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
@@ -2457,7 +2533,8 @@ public void testRecoverWithUnbackedNextGen() throws IOException {
assertNotNull("operation 1 must be non-null", op);
assertEquals("payload mismatch for operation 1", 1, Integer.parseInt(op.getSource().source.utf8ToString()));
- tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(2).getBytes(Charset.forName("UTF-8"))));
+ tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(),
+ Integer.toString(2).getBytes(Charset.forName("UTF-8"))));
}
try (Translog tlog = openTranslog(config, translog.getTranslogUUID());
@@ -2466,16 +2543,19 @@ public void testRecoverWithUnbackedNextGen() throws IOException {
Translog.Operation secondOp = snapshot.next();
assertNotNull("operation 2 must be non-null", secondOp);
- assertEquals("payload mismatch for operation 2", Integer.parseInt(secondOp.getSource().source.utf8ToString()), 2);
+ assertEquals("payload mismatch for operation 2",
+ Integer.parseInt(secondOp.getSource().source.utf8ToString()), 2);
Translog.Operation firstOp = snapshot.next();
assertNotNull("operation 1 must be non-null", firstOp);
- assertEquals("payload mismatch for operation 1", Integer.parseInt(firstOp.getSource().source.utf8ToString()), 1);
+ assertEquals("payload mismatch for operation 1",
+ Integer.parseInt(firstOp.getSource().source.utf8ToString()), 1);
}
}
public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException {
- translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(),
+ Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
@@ -2483,13 +2563,15 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException {
// don't copy the new file
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
- TranslogException ex = expectThrows(TranslogException.class, () -> new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get));
+ TranslogException ex = expectThrows(TranslogException.class, () -> new Translog(config, translog.getTranslogUUID(),
+ translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get));
assertEquals(ex.getMessage(), "failed to create new translog file");
assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class);
}
public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
- translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
+ translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(),
+ Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
@@ -2501,7 +2583,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
// we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog"));
- try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
+ try (Translog tlog = new Translog(config, translogUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
for (int i = 0; i < 1; i++) {
@@ -2510,7 +2593,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
- tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
+ tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(),
+ Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
TranslogException ex = expectThrows(TranslogException.class,
@@ -2541,12 +2625,14 @@ public void testWithRandomException() throws IOException {
String generationUUID = null;
try {
boolean committing = false;
- final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, createTranslogDeletionPolicy());
+ final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false,
+ generationUUID, createTranslogDeletionPolicy());
try {
LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
String doc = lineFileDocs.nextDoc().toString();
- failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), doc.getBytes(Charset.forName("UTF-8"))));
+ failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(),
+ doc.getBytes(Charset.forName("UTF-8"))));
unsynced.add(doc);
if (randomBoolean()) {
failableTLog.sync();
@@ -2554,7 +2640,8 @@ public void testWithRandomException() throws IOException {
unsynced.clear();
}
if (randomFloat() < 0.1) {
- failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
+ // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
+ failableTLog.sync();
syncedDocs.addAll(unsynced);
unsynced.clear();
failableTLog.rollGeneration();
@@ -2620,9 +2707,11 @@ public void testWithRandomException() throws IOException {
deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery);
if (generationUUID == null) {
// we never managed to successfully create a translog, make it
- generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
+ generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(),
+ SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
}
- try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
+ try (Translog translog = new Translog(config, generationUUID, deletionPolicy,
+ () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
Translog.Snapshot snapshot = translog.newSnapshotFromGen(
new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) {
assertEquals(syncedDocs.size(), snapshot.totalOperations());
@@ -2655,7 +2744,8 @@ private Checkpoint randomCheckpoint() {
public void testCheckpointOnDiskFull() throws IOException {
final Checkpoint checkpoint = randomCheckpoint();
Path tempDir = createTempDir();
- Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
+ Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint,
+ StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final Checkpoint checkpoint2 = randomCheckpoint();
try {
Checkpoint.write((p, o) -> {
@@ -2719,7 +2809,8 @@ public void testTranslogOpSerialization() throws Exception {
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
- ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, XContentType.JSON,
+ ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null,
+ Arrays.asList(document), B_1, XContentType.JSON,
null);
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
@@ -2948,7 +3039,8 @@ public void testSnapshotReadOperationInReverse() throws Exception {
for (int gen = 0; gen < generations; gen++) {
final int operations = randomIntBetween(1, 100);
for (int i = 0; i < operations; i++) {
- Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1});
+ Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10),
+ seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1});
translog.add(op);
views.peek().add(op);
}
@@ -2973,7 +3065,8 @@ public void testSnapshotDedupOperations() throws Exception {
List batch = LongStream.rangeClosed(0, between(0, 500)).boxed().collect(Collectors.toList());
Randomness.shuffle(batch);
for (Long seqNo : batch) {
- Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
+ Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10),
+ seqNo, primaryTerm.get(), new byte[]{1});
translog.add(op);
latestOperations.put(op.seqNo(), op);
}
@@ -3006,7 +3099,8 @@ public void testCloseSnapshotTwice() throws Exception {
public void testTranslogCloseInvariant() throws IOException {
assumeTrue("test only works with assertions enabled", Assertions.ENABLED);
class MisbehavingTranslog extends Translog {
- MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException {
+ MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy,
+ LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException {
super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier);
}
@@ -3035,7 +3129,8 @@ void callCloseOnTragicEvent() {
final TranslogConfig translogConfig = getTranslogConfig(path);
final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings());
final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
- MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get);
+ MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy,
+ () -> globalCheckpoint.get(), primaryTerm::get);
expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseDirectly());
expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtils());