-
Notifications
You must be signed in to change notification settings - Fork 3.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
API for stopping streaming tasks early #16310
Changes from 9 commits
3373349
5b6fb15
31782e4
8d80d20
ee1bb44
74088e7
e5f36a8
b508816
ada524d
9a58645
603bae3
17e2676
da500a8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -30,6 +30,7 @@ | |
import com.google.common.collect.Sets; | ||
import com.google.inject.Inject; | ||
import com.sun.jersey.spi.container.ResourceFilters; | ||
import org.apache.commons.lang3.NotImplementedException; | ||
import org.apache.druid.audit.AuditEntry; | ||
import org.apache.druid.audit.AuditManager; | ||
import org.apache.druid.indexing.overlord.DataSourceMetadata; | ||
|
@@ -395,6 +396,43 @@ public Response shutdown(@PathParam("id") final String id) | |
return terminate(id); | ||
} | ||
|
||
/** | ||
* This method will immediately try to handoff the list of task group ids for the given supervisor. | ||
* This is a best effort API and makes no guarantees of execution, e.g. if a non-existent task group id | ||
* is passed to it, the API call will still suceced. | ||
*/ | ||
@POST | ||
@Path("/{id}/taskGroups/handoff") | ||
@Produces(MediaType.APPLICATION_JSON) | ||
@ResourceFilters(SupervisorResourceFilter.class) | ||
public Response handoffTaskGroups(@PathParam("id") final String id, final List<Integer> taskGroupIds) | ||
{ | ||
if (taskGroupIds == null || taskGroupIds.isEmpty()) { | ||
return Response.status(Response.Status.BAD_REQUEST) | ||
.entity(ImmutableMap.of("error", "List of task groups to handoff can't be empty")) | ||
.build(); | ||
|
||
} | ||
return asLeaderWithSupervisorManager( | ||
manager -> { | ||
try { | ||
if (manager.handoffTaskGroupsEarly(id, taskGroupIds)) { | ||
return Response.ok(ImmutableMap.of("id", id, "taskGroupIds", taskGroupIds)).build(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the success case, why do we return the request parameters back in the response? We might as well just return an empty 200 OK response. Alternatively, we could return the |
||
} else { | ||
return Response.status(Response.Status.NOT_FOUND) | ||
.entity(ImmutableMap.of("error", StringUtils.format("Supervisor was not found [%s]", id))) | ||
.build(); | ||
} | ||
} | ||
catch (NotImplementedException e) { | ||
return Response.status(Response.Status.BAD_REQUEST) | ||
.entity(ImmutableMap.of("error", StringUtils.format("Supervisor [%s] does not support early handoff", id))) | ||
.build(); | ||
} | ||
} | ||
); | ||
} | ||
|
||
@POST | ||
@Path("/{id}/terminate") | ||
@Produces(MediaType.APPLICATION_JSON) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -203,6 +203,8 @@ public class TaskGroup | |
final String baseSequenceName; | ||
DateTime completionTimeout; // is set after signalTasksToFinish(); if not done by timeout, take corrective action | ||
|
||
Boolean shutdownEarly = false; // set by SupervisorManager.stopTaskGroupEarly | ||
georgew5656 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
TaskGroup( | ||
int groupId, | ||
ImmutableMap<PartitionIdType, SequenceOffsetType> startingSequences, | ||
|
@@ -266,6 +268,16 @@ Set<String> taskIds() | |
return tasks.keySet(); | ||
} | ||
|
||
void setShutdownEarly() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should these methods be renamed to |
||
{ | ||
shutdownEarly = true; | ||
} | ||
|
||
Boolean getShutdownEarly() | ||
{ | ||
return shutdownEarly; | ||
} | ||
|
||
@VisibleForTesting | ||
public String getBaseSequenceName() | ||
{ | ||
|
@@ -657,6 +669,39 @@ public String getType() | |
} | ||
} | ||
|
||
private class HandoffTaskGroupsNotice implements Notice | ||
{ | ||
final List<Integer> taskGroupIds; | ||
private static final String TYPE = "handoff_task_group_notice"; | ||
|
||
HandoffTaskGroupsNotice( | ||
@Nonnull final List<Integer> taskGroupIds | ||
) | ||
{ | ||
this.taskGroupIds = taskGroupIds; | ||
} | ||
|
||
@Override | ||
public void handle() | ||
{ | ||
for (Integer taskGroupId : taskGroupIds) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should add an info log line here saying that we are now going to handoff these task groups early. Otherwise, there is no way to know that such a request was even received by the supervisor. We can probably also add a log line in |
||
TaskGroup taskGroup = activelyReadingTaskGroups.getOrDefault(taskGroupId, null); | ||
if (taskGroup == null) { | ||
log.info("Tried to stop task group that wasn't actively reading."); | ||
georgew5656 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
continue; | ||
} | ||
|
||
taskGroup.setShutdownEarly(); | ||
} | ||
} | ||
|
||
@Override | ||
public String getType() | ||
{ | ||
return TYPE; | ||
} | ||
} | ||
|
||
protected class CheckpointNotice implements Notice | ||
{ | ||
private final int taskGroupId; | ||
|
@@ -1932,6 +1977,12 @@ private boolean isTaskInPendingCompletionGroups(String taskId) | |
return false; | ||
} | ||
|
||
@Override | ||
public void handoffTaskGroupsEarly(List<Integer> taskGroupIds) | ||
{ | ||
addNotice(new HandoffTaskGroupsNotice(taskGroupIds)); | ||
} | ||
|
||
private void discoverTasks() throws ExecutionException, InterruptedException | ||
{ | ||
int taskCount = 0; | ||
|
@@ -3143,14 +3194,15 @@ private void checkTaskDuration() throws ExecutionException, InterruptedException | |
} else { | ||
DateTime earliestTaskStart = computeEarliestTaskStartTime(group); | ||
|
||
if (earliestTaskStart.plus(ioConfig.getTaskDuration()).isBeforeNow()) { | ||
if (earliestTaskStart.plus(ioConfig.getTaskDuration()).isBeforeNow() || group.getShutdownEarly()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This looks like it is still subject to the stopTaskCount limit ... for my use case I would want to be able to force this rollover without any other restrictions. If you have a use case that requires that it be subject to stopTaskCount, then can you add an optional parameter (e.g. "force=true") that will allow us to choose between the modes? Thanks. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can't you just set stopTaskCount=0 to not have that config apply? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Changing stopTaskCount requires a Supervisor change, which defeats the whole purpose ;) My clusters generally run with stopTaskCount=1 which works very well ... but for high taskCount jobs it is much more likely that one task will be cycling at any given time, which would render this command useless ... Furthermore I may have to cycle more than one task, depending on how many of the tasks are running on the node being cycled ... that makes it that much more likely that I will go above the stopTaskCount level. ... unless this command queues up the tasks for cycling, but I didn't think that logic is in the code. So what happens now if you are at the stopTaskCount limit? Is the stop-early command ignored? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. in this implementation it would respect the stopTaskCount and not stop the task until a "stop slot" is available. I guess a alternate implementation could be to just short-circuit stopTaskCount and always stop tasks that have been stopped early, let me think about that a bit i feel like having a config is too complicated imo, it should be one behavior or the other There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. thinking about this some more i think it should probably just ignore stopTaskCount since the operator has manually requested the stop There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add a comment in the code explaining why we chose to ignore stopTaskCount when There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If the task group is marked for early shutdown, we should log it. |
||
// if this task has run longer than the configured duration | ||
// as long as the pending task groups are less than the configured stop task count. | ||
// If shutdownEarly has been set, ignore stopTaskCount since this is a manual operator action. | ||
if (pendingCompletionTaskGroups.values() | ||
.stream() | ||
.mapToInt(CopyOnWriteArrayList::size) | ||
.sum() + stoppedTasks.get() | ||
< ioConfig.getMaxAllowedStops()) { | ||
< ioConfig.getMaxAllowedStops() || group.getShutdownEarly()) { | ||
log.info( | ||
"Task group [%d] has run for [%s]. Stopping.", | ||
groupId, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this is the expected format for a REST API. Also it looks like taskGroupIds are integers, so I don't think quotes are needed around the numbers