Skip to content

Commit

Permalink
[ML] Prefer cluster state config to index documents (#36014)
Browse files Browse the repository at this point in the history
Flipping the change in #35940
  • Loading branch information
davidkyle authored Nov 29, 2018
1 parent 4d6f556 commit 4e98158
Show file tree
Hide file tree
Showing 12 changed files with 307 additions and 274 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ public final class Messages {
public static final String JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT = "Job memory status changed to hard_limit at {0}; adjust the " +
"analysis_limits.model_memory_limit setting to ensure all data is analyzed";

public static final String JOB_CANNOT_CLOSE_BECAUSE_DATAFEED = "cannot close job datafeed [{0}] hasn''t been stopped";

public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES = "categorization_filters contain duplicates";
public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY =
"categorization_filters are not allowed to contain empty strings";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,11 @@
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.MlTasks;
import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskState;
Expand All @@ -46,12 +48,13 @@
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;

public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJobAction.JobTask, CloseJobAction.Request,
CloseJobAction.Response, CloseJobAction.Response> {
Expand Down Expand Up @@ -112,7 +115,7 @@ protected void doExecute(Task task, CloseJobAction.Request request, ActionListen
PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
jobManager.expandJobIds(request.getJobId(), request.allowNoJobs(), ActionListener.wrap(
expandedJobIds -> {
validate(expandedJobIds, request.isForce(), tasksMetaData, ActionListener.wrap(
validate(expandedJobIds, request.isForce(), MlMetadata.getMlMetadata(state), tasksMetaData, ActionListener.wrap(
response -> {
request.setOpenJobIds(response.openJobIds.toArray(new String[0]));
if (response.openJobIds.isEmpty() && response.closingJobIds.isEmpty()) {
Expand Down Expand Up @@ -173,13 +176,14 @@ class OpenAndClosingIds {
*
* @param expandedJobIds The job ids
* @param forceClose Force close the job(s)
* @param mlMetadata The ML metadata for un-migrated jobs
* @param tasksMetaData Persistent tasks
* @param listener Resolved job Ids listener
*/
void validate(Collection<String> expandedJobIds, boolean forceClose, PersistentTasksCustomMetaData tasksMetaData,
ActionListener<OpenAndClosingIds> listener) {
void validate(Collection<String> expandedJobIds, boolean forceClose, MlMetadata mlMetadata,
PersistentTasksCustomMetaData tasksMetaData, ActionListener<OpenAndClosingIds> listener) {

checkDatafeedsHaveStopped(expandedJobIds, tasksMetaData, ActionListener.wrap(
checkDatafeedsHaveStopped(expandedJobIds, tasksMetaData, mlMetadata, ActionListener.wrap(
response -> {
OpenAndClosingIds ids = new OpenAndClosingIds();
List<String> failedJobs = new ArrayList<>();
Expand Down Expand Up @@ -209,14 +213,27 @@ void validate(Collection<String> expandedJobIds, boolean forceClose, PersistentT
}

void checkDatafeedsHaveStopped(Collection<String> jobIds, PersistentTasksCustomMetaData tasksMetaData,
ActionListener<Boolean> listener) {
MlMetadata mlMetadata, ActionListener<Boolean> listener) {

for (String jobId: jobIds) {
Optional<DatafeedConfig> datafeed = mlMetadata.getDatafeedByJobId(jobId);
if (datafeed.isPresent()) {
DatafeedState datafeedState = MlTasks.getDatafeedState(datafeed.get().getId(), tasksMetaData);
if (datafeedState != DatafeedState.STOPPED) {
listener.onFailure(
ExceptionsHelper.conflictStatusException(
Messages.getMessage(Messages.JOB_CANNOT_CLOSE_BECAUSE_DATAFEED, datafeed.get().getId())));
return;
}
}
}
datafeedConfigProvider.findDatafeedsForJobIds(jobIds, ActionListener.wrap(
datafeedIds -> {
for (String datafeedId : datafeedIds) {
DatafeedState datafeedState = MlTasks.getDatafeedState(datafeedId, tasksMetaData);
if (datafeedState != DatafeedState.STOPPED) {
listener.onFailure(ExceptionsHelper.conflictStatusException(
"cannot close job datafeed [{}] hasn't been stopped", datafeedId));
Messages.getMessage(Messages.JOB_CANNOT_CLOSE_BECAUSE_DATAFEED, datafeedId)));
return;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,21 +131,15 @@ private void deleteDatafeedConfig(DeleteDatafeedAction.Request request, ClusterS
return;
}


datafeedConfigProvider.deleteDatafeedConfig(request.getDatafeedId(), ActionListener.wrap(
deleteResponse -> listener.onResponse(new AcknowledgedResponse(true)),
e -> {
if (e.getClass() == ResourceNotFoundException.class) {
// is the datafeed in the clusterstate
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
if (mlMetadata.getDatafeed(request.getDatafeedId()) != null) {
deleteDatafeedFromMetadata(request, listener);
return;
}
}
listener.onFailure(e);
}
));
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
if (mlMetadata.getDatafeed(request.getDatafeedId()) != null) {
deleteDatafeedFromMetadata(request, listener);
} else {
datafeedConfigProvider.deleteDatafeedConfig(request.getDatafeedId(), ActionListener.wrap(
deleteResponse -> listener.onResponse(new AcknowledgedResponse(true)),
listener::onFailure
));
}
}

private void deleteDatafeedFromMetadata(DeleteDatafeedAction.Request request, ActionListener<AcknowledgedResponse> listener) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ protected AcknowledgedResponse newResponse() {
@Override
protected void masterOperation(FinalizeJobExecutionAction.Request request, ClusterState state,
ActionListener<AcknowledgedResponse> listener) {

MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
Set<String> jobsInClusterState = Arrays.stream(request.getJobIds())
.filter(id -> mlMetadata.getJobs().containsKey(id))
Expand All @@ -83,14 +82,19 @@ protected void masterOperation(FinalizeJobExecutionAction.Request request, Clust
finalizeIndexJobs(Arrays.asList(request.getJobIds()), listener);
} else {
ActionListener<AcknowledgedResponse> finalizeClusterStateJobsListener = ActionListener.wrap(
ack -> finalizeClusterStateJobs(jobsInClusterState, listener),
ack -> {
Set<String> jobsInIndex = new HashSet<>(Arrays.asList(request.getJobIds()));
jobsInIndex.removeAll(jobsInClusterState);
if (jobsInIndex.isEmpty()) {
listener.onResponse(ack);
} else {
finalizeIndexJobs(jobsInIndex, listener);
}
},
listener::onFailure
);

Set<String> jobsInIndex = new HashSet<>(Arrays.asList(request.getJobIds()));
jobsInIndex.removeAll(jobsInClusterState);

finalizeIndexJobs(jobsInIndex, finalizeClusterStateJobsListener);
finalizeClusterStateJobs(jobsInClusterState, finalizeClusterStateJobsListener);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
Expand Down Expand Up @@ -693,53 +692,47 @@ public void onTimeout(TimeValue timeout) {

private void clearJobFinishedTime(String jobId, ActionListener<AcknowledgedResponse> listener) {

JobUpdate update = new JobUpdate.Builder(jobId).setClearFinishTime(true).build();
boolean jobIsInClusterState = ClusterStateJobUpdate.jobIsInClusterState(clusterService.state(), jobId);
if (jobIsInClusterState) {
clusterService.submitStateUpdateTask("clearing-job-finish-time-for-" + jobId, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState);
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata);
Job.Builder jobBuilder = new Job.Builder(mlMetadata.getJobs().get(jobId));
jobBuilder.setFinishedTime(null);

mlMetadataBuilder.putJob(jobBuilder.build(), true);
ClusterState.Builder builder = ClusterState.builder(currentState);
return builder.metaData(new MetaData.Builder(currentState.metaData())
.putCustom(MlMetadata.TYPE, mlMetadataBuilder.build()))
.build();
}

jobConfigProvider.updateJob(jobId, update, null, ActionListener.wrap(
job -> listener.onResponse(new AcknowledgedResponse(true)),
e -> {
if (e.getClass() == ResourceNotFoundException.class) {
// Maybe the config is in the clusterstate
if (ClusterStateJobUpdate.jobIsInClusterState(clusterService.state(), jobId)) {
clearJobFinishedTimeClusterState(jobId, listener);
return;
}
}
logger.error("[" + jobId + "] Failed to clear finished_time", e);
// Not a critical error so continue
@Override
public void onFailure(String source, Exception e) {
logger.error("[" + jobId + "] Failed to clear finished_time; source [" + source + "]", e);
listener.onResponse(new AcknowledgedResponse(true));
}
));
}

private void clearJobFinishedTimeClusterState(String jobId, ActionListener<AcknowledgedResponse> listener) {
clusterService.submitStateUpdateTask("clearing-job-finish-time-for-" + jobId, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState);
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata);
Job.Builder jobBuilder = new Job.Builder(mlMetadata.getJobs().get(jobId));
jobBuilder.setFinishedTime(null);

mlMetadataBuilder.putJob(jobBuilder.build(), true);
ClusterState.Builder builder = ClusterState.builder(currentState);
return builder.metaData(new MetaData.Builder(currentState.metaData())
.putCustom(MlMetadata.TYPE, mlMetadataBuilder.build()))
.build();
}

@Override
public void onFailure(String source, Exception e) {
logger.error("[" + jobId + "] Failed to clear finished_time; source [" + source + "]", e);
listener.onResponse(new AcknowledgedResponse(true));
}

@Override
public void clusterStateProcessed(String source, ClusterState oldState,
ClusterState newState) {
listener.onResponse(new AcknowledgedResponse(true));
}
});
@Override
public void clusterStateProcessed(String source, ClusterState oldState,
ClusterState newState) {
listener.onResponse(new AcknowledgedResponse(true));
}
});
} else {
JobUpdate update = new JobUpdate.Builder(jobId).setClearFinishTime(true).build();

jobConfigProvider.updateJob(jobId, update, null, ActionListener.wrap(
job -> listener.onResponse(new AcknowledgedResponse(true)),
e -> {
logger.error("[" + jobId + "] Failed to clear finished_time", e);
// Not a critical error so continue
listener.onResponse(new AcknowledgedResponse(true));
}
));
}
}

private void cancelJobStart(PersistentTasksCustomMetaData.PersistentTask<OpenJobAction.JobParams> persistentTask, Exception exception,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
*/
package org.elasticsearch.xpack.ml.action;

import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
Expand Down Expand Up @@ -70,6 +69,19 @@ protected PutDatafeedAction.Response newResponse() {
protected void masterOperation(UpdateDatafeedAction.Request request, ClusterState state,
ActionListener<PutDatafeedAction.Response> listener) throws Exception {

MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
boolean datafeedConfigIsInClusterState = mlMetadata.getDatafeed(request.getUpdate().getId()) != null;
if (datafeedConfigIsInClusterState) {
updateDatafeedInClusterState(request, listener);
} else {
updateDatafeedInIndex(request, state, listener);
}
}

private void updateDatafeedInIndex(UpdateDatafeedAction.Request request, ClusterState state,
ActionListener<PutDatafeedAction.Response> listener) throws Exception {
final Map<String, String> headers = threadPool.getThreadContext().getHeaders();

// Check datafeed is stopped
PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
if (MlTasks.getDatafeedTask(request.getUpdate().getId(), tasks) != null) {
Expand All @@ -79,30 +91,14 @@ protected void masterOperation(UpdateDatafeedAction.Request request, ClusterStat
return;
}

updateDatafeedInIndex(request, state, listener);
}

private void updateDatafeedInIndex(UpdateDatafeedAction.Request request, ClusterState state,
ActionListener<PutDatafeedAction.Response> listener) throws Exception {
final Map<String, String> headers = threadPool.getThreadContext().getHeaders();
String datafeedId = request.getUpdate().getId();

CheckedConsumer<Boolean, Exception> updateConsumer = ok -> {
datafeedConfigProvider.updateDatefeedConfig(request.getUpdate().getId(), request.getUpdate(), headers,
jobConfigProvider::validateDatafeedJob,
ActionListener.wrap(
updatedConfig -> listener.onResponse(new PutDatafeedAction.Response(updatedConfig)),
e -> {
if (e.getClass() == ResourceNotFoundException.class) {
// try the clusterstate
MlMetadata mlMetadata = MlMetadata.getMlMetadata(state);
if (mlMetadata.getDatafeed(request.getUpdate().getId()) != null) {
updateDatafeedInClusterState(request, listener);
return;
}
}
listener.onFailure(e);
}
listener::onFailure
));
};

Expand Down
Loading

0 comments on commit 4e98158

Please sign in to comment.