Skip to content

Commit

Permalink
Merge branch '5.6' into static-deprecation-logger
Browse files Browse the repository at this point in the history
* 5.6:
  Allow not configure logging without config
  Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS (elastic#26127)
  Update reference from DateHistogram to Histogram (elastic#26169)
  • Loading branch information
jasontedor committed Aug 14, 2017
2 parents 9f5ffe7 + 54620ff commit 476e641
Show file tree
Hide file tree
Showing 11 changed files with 140 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,15 @@ static int main(final String[] args, final Elasticsearch elasticsearch, final Te
return elasticsearch.main(args, terminal);
}

@Override
protected boolean shouldConfigureLoggingWithoutConfig() {
/*
* If we allow logging to be configured without a config before we ready to read the log4j2.properties file, then we will fail to
* detect uses of logging before it is properly configured.
*/
return false;
}

@Override
protected void execute(Terminal terminal, OptionSet options, Environment env) throws UserException {
if (options.nonOptionArguments().isEmpty() == false) {
Expand Down
20 changes: 16 additions & 4 deletions core/src/main/java/org/elasticsearch/cli/Command.java
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,12 @@ public final int main(String[] args, Terminal terminal) throws Exception {
Runtime.getRuntime().addShutdownHook(shutdownHookThread.get());
}

// initialize default for es.logger.level because we will not read the log4j2.properties
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
LogConfigurator.configureWithoutConfig(settings);
if (shouldConfigureLoggingWithoutConfig()) {
// initialize default for es.logger.level because we will not read the log4j2.properties
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
LogConfigurator.configureWithoutConfig(settings);
}

try {
mainWithoutErrorHandling(args, terminal);
Expand All @@ -100,6 +102,16 @@ public final int main(String[] args, Terminal terminal) throws Exception {
return ExitCodes.OK;
}

/**
* Indicate whether or not logging should be configured without reading a log4j2.properties. Most commands should do this because we do
* not configure logging for CLI tools. Only commands that configure logging on their own should not do this.
*
* @return true if logging should be configured without reading a log4j2.properties file
*/
protected boolean shouldConfigureLoggingWithoutConfig() {
return true;
}

/**
* Executes the command, but all errors are thrown.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,8 @@ public ShardSnapshotStatus(String nodeId, State state, String reason) {
this.nodeId = nodeId;
this.state = state;
this.reason = reason;
// If the state is failed we have to have a reason for this failure
assert state.failed() == false || reason != null;
}

public ShardSnapshotStatus(StreamInput in) throws IOException {
Expand Down Expand Up @@ -403,7 +405,9 @@ public SnapshotsInProgress(StreamInput in) throws IOException {
ShardId shardId = ShardId.readShardId(in);
String nodeId = in.readOptionalString();
State shardState = State.fromValue(in.readByte());
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
// Workaround for https://github.com/elastic/elasticsearch/issues/25878
String reason = shardState.failed() ? "" : null;
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState, reason));
}
long repositoryStateId = UNDEFINED_REPOSITORY_STATE_ID;
if (in.getVersion().onOrAfter(REPOSITORY_ID_INTRODUCED_VERSION)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ public SnapshotShardFailure(@Nullable String nodeId, ShardId shardId, String rea
this.nodeId = nodeId;
this.shardId = shardId;
this.reason = reason;
assert reason != null;
status = RestStatus.INTERNAL_SERVER_ERROR;
}

Expand Down Expand Up @@ -192,7 +193,9 @@ public static SnapshotShardFailure fromXContent(XContentParser parser) throws IO
} else if ("node_id".equals(currentFieldName)) {
snapshotShardFailure.nodeId = parser.text();
} else if ("reason".equals(currentFieldName)) {
snapshotShardFailure.reason = parser.text();
// Workaround for https://github.com/elastic/elasticsearch/issues/25878
// Some old snapshot might still have null in shard failure reasons
snapshotShardFailure.reason = parser.textOrNull();
} else if ("shard_id".equals(currentFieldName)) {
shardId = parser.intValue();
} else if ("status".equals(currentFieldName)) {
Expand All @@ -215,6 +218,11 @@ public static SnapshotShardFailure fromXContent(XContentParser parser) throws IO
throw new ElasticsearchParseException("index shard was not set");
}
snapshotShardFailure.shardId = new ShardId(index, index_uuid, shardId);
// Workaround for https://github.com/elastic/elasticsearch/issues/25878
// Some old snapshot might still have null in shard failure reasons
if (snapshotShardFailure.reason == null) {
snapshotShardFailure.reason = "";
}
return snapshotShardFailure;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1128,7 +1128,8 @@ public ClusterState execute(ClusterState currentState) throws Exception {
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardEntry : snapshotEntry.shards()) {
ShardSnapshotStatus status = shardEntry.value;
if (!status.state().completed()) {
shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED));
shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED,
"aborted by snapshot deletion"));
} else {
shardsBuilder.put(shardEntry.key, status);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,12 @@ public void testWaitingIndices() {
// test more than one waiting shard in an index
shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING));
shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING));
shards.put(new ShardId(idx1Name, idx1UUID, 2), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState()));
shards.put(new ShardId(idx1Name, idx1UUID, 2), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), ""));
// test exactly one waiting shard in an index
shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING));
shards.put(new ShardId(idx2Name, idx2UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState()));
shards.put(new ShardId(idx2Name, idx2UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), ""));
// test no waiting shards in an index
shards.put(new ShardId(idx3Name, idx3UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState()));
shards.put(new ShardId(idx3Name, idx3UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), ""));
Entry entry = new Entry(snapshot, randomBoolean(), randomBoolean(), State.INIT,
indices, System.currentTimeMillis(), randomLong(), shards.build());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,13 +141,20 @@ public SnapshotInfo waitForCompletion(String repository, String snapshotName, Ti
return null;
}

public static String blockMasterFromFinalizingSnapshot(final String repositoryName) {
public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository)internalCluster().getInstance(RepositoriesService.class, masterName)
.repository(repositoryName)).setBlockOnWriteIndexFile(true);
return masterName;
}

public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) {
final String masterName = internalCluster().getMasterName();
((MockRepository)internalCluster().getInstance(RepositoriesService.class, masterName)
.repository(repositoryName)).setBlockAndFailOnWriteSnapFiles(true);
return masterName;
}

public static String blockNodeWithIndex(final String repositoryName, final String indexName) {
for(String node : internalCluster().nodesInclude(indexName)) {
((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository(repositoryName))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -798,6 +798,67 @@ public void testMasterShutdownDuringSnapshot() throws Exception {
assertEquals(0, snapshotInfo.failedShards());
}


public void testMasterAndDataShutdownDuringSnapshot() throws Exception {
logger.info("--> starting three master nodes and two data nodes");
internalCluster().startMasterOnlyNodes(3);
internalCluster().startDataOnlyNodes(2);

final Client client = client();

logger.info("--> creating repository");
assertAcked(client.admin().cluster().preparePutRepository("test-repo")
.setType("mock").setSettings(Settings.builder()
.put("location", randomRepoPath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));

assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
.put("number_of_replicas", 0)));
ensureGreen();

logger.info("--> indexing some data");
final int numdocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();

final int numberOfShards = getNumShards("test-idx").numPrimaries;
logger.info("number of shards: {}", numberOfShards);

final String masterNode = blockMasterFromFinalizingSnapshotOnSnapFile("test-repo");
final String dataNode = blockNodeWithIndex("test-repo", "test-idx");

dataNodeClient().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();

logger.info("--> stopping data node {}", dataNode);
stopNode(dataNode);
logger.info("--> stopping master node {} ", masterNode);
internalCluster().stopCurrentMasterNode();

logger.info("--> wait until the snapshot is done");

assertBusy(() -> {
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertTrue(snapshotInfo.state().completed());
}, 1, TimeUnit.MINUTES);

logger.info("--> verify that snapshot was partial");

GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertEquals(SnapshotState.PARTIAL, snapshotInfo.state());
assertNotEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards());
assertThat(snapshotInfo.failedShards(), greaterThan(0));
for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) {
assertNotNull(failure.reason());
}
}

@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25281")
public void testMasterShutdownDuringFailedSnapshot() throws Exception {
logger.info("--> starting two master nodes and two data nodes");
Expand Down Expand Up @@ -831,7 +892,7 @@ public void testMasterShutdownDuringFailedSnapshot() throws Exception {
assertEquals(ClusterHealthStatus.RED, client().admin().cluster().prepareHealth().get().getStatus()),
30, TimeUnit.SECONDS);

final String masterNode = blockMasterFromFinalizingSnapshot("test-repo");
final String masterNode = blockMasterFromFinalizingSnapshotOnIndexFile("test-repo");

logger.info("--> snapshot");
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2293,9 +2293,9 @@ public void testDeleteOrphanSnapshot() throws Exception {
public ClusterState execute(ClusterState currentState) {
// Simulate orphan snapshot
ImmutableOpenMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableOpenMap.builder();
shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED));
shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED));
shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED));
shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted"));
shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted"));
shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted"));
List<Entry> entries = new ArrayList<>();
entries.add(new Entry(new Snapshot(repositoryName,
createSnapshotResponse.getSnapshotInfo().snapshotId()),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,9 @@ public long getFailureCount() {
* finalization of a snapshot, while permitting other IO operations to proceed unblocked. */
private volatile boolean blockOnWriteIndexFile;

/** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died master node */
private volatile boolean blockAndFailOnWriteSnapFile;

private volatile boolean atomicMove;

private volatile boolean blocked = false;
Expand All @@ -118,6 +121,7 @@ public MockRepository(RepositoryMetaData metadata, Environment environment,
blockOnControlFiles = metadata.settings().getAsBoolean("block_on_control", false);
blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false);
blockOnInitialization = metadata.settings().getAsBoolean("block_on_init", false);
blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false);
randomPrefix = metadata.settings().get("random", "default");
waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L);
atomicMove = metadata.settings().getAsBoolean("atomic_move", true);
Expand Down Expand Up @@ -168,13 +172,18 @@ public synchronized void unblock() {
blockOnControlFiles = false;
blockOnInitialization = false;
blockOnWriteIndexFile = false;
blockAndFailOnWriteSnapFile = false;
this.notifyAll();
}

public void blockOnDataFiles(boolean blocked) {
blockOnDataFiles = blocked;
}

public void setBlockAndFailOnWriteSnapFiles(boolean blocked) {
blockAndFailOnWriteSnapFile = blocked;
}

public void setBlockOnWriteIndexFile(boolean blocked) {
blockOnWriteIndexFile = blocked;
}
Expand All @@ -187,7 +196,8 @@ private synchronized boolean blockExecution() {
logger.debug("Blocking execution");
boolean wasBlocked = false;
try {
while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization || blockOnWriteIndexFile) {
while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization || blockOnWriteIndexFile ||
blockAndFailOnWriteSnapFile) {
blocked = true;
this.wait();
wasBlocked = true;
Expand Down Expand Up @@ -266,6 +276,8 @@ private void maybeIOExceptionOrBlock(String blobName) throws IOException {
throw new IOException("Random IOException");
} else if (blockOnControlFiles) {
blockExecutionAndMaybeWait(blobName);
} else if (blobName.startsWith("snap-") && blockAndFailOnWriteSnapFile) {
blockExecutionAndFail(blobName);
}
}
}
Expand All @@ -283,6 +295,15 @@ private void blockExecutionAndMaybeWait(final String blobName) {
}
}

/**
* Blocks an I/O operation on the blob fails and throws an exception when unblocked
*/
private void blockExecutionAndFail(final String blobName) throws IOException {
logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
blockExecution();
throw new IOException("exception after block");
}

MockBlobContainer(BlobContainer delegate) {
super(delegate);
}
Expand Down
2 changes: 1 addition & 1 deletion docs/java-api/search.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ SearchResponse sr = client.prepareSearch()
// Get your facet results
Terms agg1 = sr.getAggregations().get("agg1");
DateHistogram agg2 = sr.getAggregations().get("agg2");
Histogram agg2 = sr.getAggregations().get("agg2");
--------------------------------------------------

See <<java-aggs,Aggregations Java API>>
Expand Down

0 comments on commit 476e641

Please sign in to comment.