Skip to content

Commit

Permalink
[ML] Adapt to periodic persistent task refresh (#36494)
Browse files Browse the repository at this point in the history
After #36069 the
approach for reallocating ML persistent tasks after refreshing
job memory requirements can be simplified.
  • Loading branch information
droberts195 authored Dec 14, 2018
1 parent 2e2495c commit e0a58c9
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 171 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -57,28 +57,24 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom {
public static final String TYPE = "ml";
private static final ParseField JOBS_FIELD = new ParseField("jobs");
private static final ParseField DATAFEEDS_FIELD = new ParseField("datafeeds");
private static final ParseField LAST_MEMORY_REFRESH_VERSION_FIELD = new ParseField("last_memory_refresh_version");

public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap(), null);
public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap());
// This parser follows the pattern that metadata is parsed leniently (to allow for enhancements)
public static final ObjectParser<Builder, Void> LENIENT_PARSER = new ObjectParser<>("ml_metadata", true, Builder::new);

static {
LENIENT_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.LENIENT_PARSER.apply(p, c).build(), JOBS_FIELD);
LENIENT_PARSER.declareObjectArray(Builder::putDatafeeds,
(p, c) -> DatafeedConfig.LENIENT_PARSER.apply(p, c).build(), DATAFEEDS_FIELD);
LENIENT_PARSER.declareLong(Builder::setLastMemoryRefreshVersion, LAST_MEMORY_REFRESH_VERSION_FIELD);
}

private final SortedMap<String, Job> jobs;
private final SortedMap<String, DatafeedConfig> datafeeds;
private final Long lastMemoryRefreshVersion;
private final GroupOrJobLookup groupOrJobLookup;

private MlMetadata(SortedMap<String, Job> jobs, SortedMap<String, DatafeedConfig> datafeeds, Long lastMemoryRefreshVersion) {
private MlMetadata(SortedMap<String, Job> jobs, SortedMap<String, DatafeedConfig> datafeeds) {
this.jobs = Collections.unmodifiableSortedMap(jobs);
this.datafeeds = Collections.unmodifiableSortedMap(datafeeds);
this.lastMemoryRefreshVersion = lastMemoryRefreshVersion;
this.groupOrJobLookup = new GroupOrJobLookup(jobs.values());
}

Expand Down Expand Up @@ -121,10 +117,6 @@ public Set<String> expandDatafeedIds(String expression) {
.expand(expression);
}

public Long getLastMemoryRefreshVersion() {
return lastMemoryRefreshVersion;
}

@Override
public Version getMinimalSupportedVersion() {
return Version.V_5_4_0;
Expand Down Expand Up @@ -158,21 +150,13 @@ public MlMetadata(StreamInput in) throws IOException {
datafeeds.put(in.readString(), new DatafeedConfig(in));
}
this.datafeeds = datafeeds;
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
lastMemoryRefreshVersion = in.readOptionalLong();
} else {
lastMemoryRefreshVersion = null;
}
this.groupOrJobLookup = new GroupOrJobLookup(jobs.values());
}

@Override
public void writeTo(StreamOutput out) throws IOException {
writeMap(jobs, out);
writeMap(datafeeds, out);
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeOptionalLong(lastMemoryRefreshVersion);
}
}

private static <T extends Writeable> void writeMap(Map<String, T> map, StreamOutput out) throws IOException {
Expand All @@ -189,9 +173,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
new DelegatingMapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"), params);
mapValuesToXContent(JOBS_FIELD, jobs, builder, extendedParams);
mapValuesToXContent(DATAFEEDS_FIELD, datafeeds, builder, extendedParams);
if (lastMemoryRefreshVersion != null) {
builder.field(LAST_MEMORY_REFRESH_VERSION_FIELD.getPreferredName(), lastMemoryRefreshVersion);
}
return builder;
}

Expand All @@ -208,24 +189,17 @@ public static class MlMetadataDiff implements NamedDiff<MetaData.Custom> {

final Diff<Map<String, Job>> jobs;
final Diff<Map<String, DatafeedConfig>> datafeeds;
final Long lastMemoryRefreshVersion;

MlMetadataDiff(MlMetadata before, MlMetadata after) {
this.jobs = DiffableUtils.diff(before.jobs, after.jobs, DiffableUtils.getStringKeySerializer());
this.datafeeds = DiffableUtils.diff(before.datafeeds, after.datafeeds, DiffableUtils.getStringKeySerializer());
this.lastMemoryRefreshVersion = after.lastMemoryRefreshVersion;
}

public MlMetadataDiff(StreamInput in) throws IOException {
this.jobs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), Job::new,
MlMetadataDiff::readJobDiffFrom);
this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new,
MlMetadataDiff::readDatafeedDiffFrom);
if (in.getVersion().onOrAfter(Version.V_6_6_0)) {
lastMemoryRefreshVersion = in.readOptionalLong();
} else {
lastMemoryRefreshVersion = null;
}
}

/**
Expand All @@ -237,17 +211,13 @@ public MlMetadataDiff(StreamInput in) throws IOException {
public MetaData.Custom apply(MetaData.Custom part) {
TreeMap<String, Job> newJobs = new TreeMap<>(jobs.apply(((MlMetadata) part).jobs));
TreeMap<String, DatafeedConfig> newDatafeeds = new TreeMap<>(datafeeds.apply(((MlMetadata) part).datafeeds));
// lastMemoryRefreshVersion always comes from the diff - no need to merge with the old value
return new MlMetadata(newJobs, newDatafeeds, lastMemoryRefreshVersion);
return new MlMetadata(newJobs, newDatafeeds);
}

@Override
public void writeTo(StreamOutput out) throws IOException {
jobs.writeTo(out);
datafeeds.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeOptionalLong(lastMemoryRefreshVersion);
}
}

@Override
Expand All @@ -272,8 +242,7 @@ public boolean equals(Object o) {
return false;
MlMetadata that = (MlMetadata) o;
return Objects.equals(jobs, that.jobs) &&
Objects.equals(datafeeds, that.datafeeds) &&
Objects.equals(lastMemoryRefreshVersion, that.lastMemoryRefreshVersion);
Objects.equals(datafeeds, that.datafeeds);
}

@Override
Expand All @@ -283,14 +252,13 @@ public final String toString() {

@Override
public int hashCode() {
return Objects.hash(jobs, datafeeds, lastMemoryRefreshVersion);
return Objects.hash(jobs, datafeeds);
}

public static class Builder {

private TreeMap<String, Job> jobs;
private TreeMap<String, DatafeedConfig> datafeeds;
private Long lastMemoryRefreshVersion;

public Builder() {
jobs = new TreeMap<>();
Expand All @@ -304,7 +272,6 @@ public Builder(@Nullable MlMetadata previous) {
} else {
jobs = new TreeMap<>(previous.jobs);
datafeeds = new TreeMap<>(previous.datafeeds);
lastMemoryRefreshVersion = previous.lastMemoryRefreshVersion;
}
}

Expand Down Expand Up @@ -424,13 +391,8 @@ public Builder putDatafeeds(Collection<DatafeedConfig> datafeeds) {
return this;
}

public Builder setLastMemoryRefreshVersion(Long lastMemoryRefreshVersion) {
this.lastMemoryRefreshVersion = lastMemoryRefreshVersion;
return this;
}

public MlMetadata build() {
return new MlMetadata(jobs, datafeeds, lastMemoryRefreshVersion);
return new MlMetadata(jobs, datafeeds);
}

public void markJobAsDeleting(String jobId, PersistentTasksCustomMetaData tasks, boolean allowDeleteOpenJob) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ public Collection<Object> createComponents(Client client, ClusterService cluster
this.datafeedManager.set(datafeedManager);
MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager,
autodetectProcessManager);
MlMemoryTracker memoryTracker = new MlMemoryTracker(clusterService, threadPool, jobManager, jobResultsProvider);
MlMemoryTracker memoryTracker = new MlMemoryTracker(settings, clusterService, threadPool, jobManager, jobResultsProvider);
this.memoryTracker.set(memoryTracker);

// This object's constructor attaches to the license state, so there's no need to retain another reference to it
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,8 +257,7 @@ static RemovalResult removeJobsAndDatafeeds(List<String> jobsToRemove, List<Stri
}

MlMetadata.Builder builder = new MlMetadata.Builder();
builder.setLastMemoryRefreshVersion(mlMetadata.getLastMemoryRefreshVersion())
.putJobs(currentJobs.values())
builder.putJobs(currentJobs.values())
.putDatafeeds(currentDatafeeds.values());

return new RemovalResult(builder.build(), removedJobIds, removedDatafeedIds);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,16 +179,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j

if (memoryTracker.isRecentlyRefreshed() == false) {

boolean scheduledRefresh = memoryTracker.asyncRefresh(ActionListener.wrap(
acknowledged -> {
if (acknowledged) {
logger.trace("Job memory requirement refresh request completed successfully");
} else {
logger.warn("Job memory requirement refresh request completed but did not set time in cluster state");
}
},
e -> logger.error("Failed to refresh job memory requirements", e)
));
boolean scheduledRefresh = memoryTracker.asyncRefresh();
if (scheduledRefresh) {
String reason = "Not opening job [" + jobId + "] because job memory requirements are stale - refresh requested";
logger.debug(reason);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,13 @@
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.ack.AckedRequest;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.persistent.PersistentTasksClusterService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.ml.MlMetadata;
Expand All @@ -44,22 +41,10 @@
* 1. For all open ML jobs (via {@link #asyncRefresh})
* 2. For all open ML jobs, plus one named ML job that is not open (via {@link #refreshJobMemoryAndAllOthers})
* 3. For one named ML job (via {@link #refreshJobMemory})
* In all cases a listener informs the caller when the requested updates are complete.
* In cases 2 and 3 a listener informs the caller when the requested updates are complete.
*/
public class MlMemoryTracker implements LocalNodeMasterListener {

private static final AckedRequest ACKED_REQUEST = new AckedRequest() {
@Override
public TimeValue ackTimeout() {
return AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
}

@Override
public TimeValue masterNodeTimeout() {
return AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
}
};

private static final Duration RECENT_UPDATE_THRESHOLD = Duration.ofMinutes(1);

private final Logger logger = LogManager.getLogger(MlMemoryTracker.class);
Expand All @@ -72,14 +57,22 @@ public TimeValue masterNodeTimeout() {
private final JobResultsProvider jobResultsProvider;
private volatile boolean isMaster;
private volatile Instant lastUpdateTime;
private volatile Duration reassignmentRecheckInterval;

public MlMemoryTracker(ClusterService clusterService, ThreadPool threadPool, JobManager jobManager,
public MlMemoryTracker(Settings settings, ClusterService clusterService, ThreadPool threadPool, JobManager jobManager,
JobResultsProvider jobResultsProvider) {
this.threadPool = threadPool;
this.clusterService = clusterService;
this.jobManager = jobManager;
this.jobResultsProvider = jobResultsProvider;
setReassignmentRecheckInterval(PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING.get(settings));
clusterService.addLocalNodeMasterListener(this);
clusterService.getClusterSettings().addSettingsUpdateConsumer(
PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, this::setReassignmentRecheckInterval);
}

private void setReassignmentRecheckInterval(TimeValue recheckInterval) {
reassignmentRecheckInterval = Duration.ofNanos(recheckInterval.getNanos());
}

@Override
Expand All @@ -103,11 +96,12 @@ public String executorName() {

/**
* Is the information in this object sufficiently up to date
* for valid allocation decisions to be made using it?
* for valid task assignment decisions to be made using it?
*/
public boolean isRecentlyRefreshed() {
Instant localLastUpdateTime = lastUpdateTime;
return localLastUpdateTime != null && localLastUpdateTime.plus(RECENT_UPDATE_THRESHOLD).isAfter(Instant.now());
return localLastUpdateTime != null &&
localLastUpdateTime.plus(RECENT_UPDATE_THRESHOLD).plus(reassignmentRecheckInterval).isAfter(Instant.now());
}

/**
Expand Down Expand Up @@ -149,24 +143,19 @@ public void removeJob(String jobId) {
/**
* Uses a separate thread to refresh the memory requirement for every ML job that has
* a corresponding persistent task. This method only works on the master node.
* @param listener Will be called when the async refresh completes or fails. The
* boolean value indicates whether the cluster state was updated
* with the refresh completion time. (If it was then this will in
* cause the persistent tasks framework to check if any persistent
* tasks are awaiting allocation.)
* @return <code>true</code> if the async refresh is scheduled, and <code>false</code>
* if this is not possible for some reason.
*/
public boolean asyncRefresh(ActionListener<Boolean> listener) {
public boolean asyncRefresh() {

if (isMaster) {
try {
ActionListener<Void> mlMetaUpdateListener = ActionListener.wrap(
aVoid -> recordUpdateTimeInClusterState(listener),
listener::onFailure
ActionListener<Void> listener = ActionListener.wrap(
aVoid -> logger.trace("Job memory requirement refresh request completed successfully"),
e -> logger.error("Failed to refresh job memory requirements", e)
);
threadPool.executor(executorName()).execute(
() -> refresh(clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE), mlMetaUpdateListener));
() -> refresh(clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE), listener));
return true;
} catch (EsRejectedExecutionException e) {
logger.debug("Couldn't schedule ML memory update - node might be shutting down", e);
Expand Down Expand Up @@ -233,33 +222,6 @@ void refresh(PersistentTasksCustomMetaData persistentTasks, ActionListener<Void>
}
}

private void recordUpdateTimeInClusterState(ActionListener<Boolean> listener) {

clusterService.submitStateUpdateTask("ml-memory-last-update-time",
new AckedClusterStateUpdateTask<Boolean>(ACKED_REQUEST, listener) {
@Override
protected Boolean newResponse(boolean acknowledged) {
return acknowledged;
}

@Override
public ClusterState execute(ClusterState currentState) {
MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState);
MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata);
builder.setLastMemoryRefreshVersion(currentState.getVersion() + 1);
MlMetadata newMlMetadata = builder.build();
if (newMlMetadata.equals(currentMlMetadata)) {
// Return same reference if nothing has changed
return currentState;
} else {
ClusterState.Builder newState = ClusterState.builder(currentState);
newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(MlMetadata.TYPE, newMlMetadata).build());
return newState.build();
}
}
});
}

private void iterateMlJobTasks(Iterator<PersistentTasksCustomMetaData.PersistentTask<?>> iterator,
ActionListener<Void> refreshComplete) {
if (iterator.hasNext()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,6 @@ protected MlMetadata createTestInstance() {
builder.putJob(job, false);
}
}
if (randomBoolean()) {
builder.setLastMemoryRefreshVersion(randomNonNegativeLong());
}
return builder.build();
}

Expand Down Expand Up @@ -441,9 +438,8 @@ protected MlMetadata mutateInstance(MlMetadata instance) {
for (Map.Entry<String, DatafeedConfig> entry : datafeeds.entrySet()) {
metadataBuilder.putDatafeed(entry.getValue(), Collections.emptyMap());
}
metadataBuilder.setLastMemoryRefreshVersion(instance.getLastMemoryRefreshVersion());

switch (between(0, 2)) {
switch (between(0, 1)) {
case 0:
metadataBuilder.putJob(JobTests.createRandomizedJob(), true);
break;
Expand All @@ -463,13 +459,6 @@ protected MlMetadata mutateInstance(MlMetadata instance) {
metadataBuilder.putJob(randomJob, false);
metadataBuilder.putDatafeed(datafeedConfig, Collections.emptyMap());
break;
case 2:
if (instance.getLastMemoryRefreshVersion() == null) {
metadataBuilder.setLastMemoryRefreshVersion(randomNonNegativeLong());
} else {
metadataBuilder.setLastMemoryRefreshVersion(null);
}
break;
default:
throw new AssertionError("Illegal randomisation branch");
}
Expand Down
Loading

0 comments on commit e0a58c9

Please sign in to comment.