Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve Cargo's scheduling of builds #5100

Merged
merged 1 commit into from
Mar 1, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/cargo/ops/cargo_rustc/job_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ impl<'a> JobQueue<'a> {
/// possible along each dependency chain.
pub fn execute(&mut self, cx: &mut Context) -> CargoResult<()> {
let _p = profile::start("executing the job graph");
self.queue.queue_finished();

// We need to give a handle to the send half of our message queue to the
// jobserver helper thread. Unfortunately though we need the handle to be
Expand Down
87 changes: 84 additions & 3 deletions src/cargo/util/dependency_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ pub struct DependencyQueue<K: Eq + Hash, V> {
/// The packages which are currently being built, waiting for a call to
/// `finish`.
pending: HashSet<K>,

/// Topological depth of each key
depth: HashMap<K, usize>,
}

/// Indication of the freshness of a package.
Expand Down Expand Up @@ -66,6 +69,7 @@ impl<K: Hash + Eq + Clone, V> DependencyQueue<K, V> {
reverse_dep_map: HashMap::new(),
dirty: HashSet::new(),
pending: HashSet::new(),
depth: HashMap::new(),
}
}

Expand Down Expand Up @@ -97,14 +101,60 @@ impl<K: Hash + Eq + Clone, V> DependencyQueue<K, V> {
&mut slot.insert((my_dependencies, value)).1
}

/// All nodes have been added, calculate some internal metadata and prepare
/// for `dequeue`.
pub fn queue_finished(&mut self) {
for key in self.dep_map.keys() {
depth(key, &self.reverse_dep_map, &mut self.depth);
}

fn depth<K: Hash + Eq + Clone>(
key: &K,
map: &HashMap<K, HashSet<K>>,
results: &mut HashMap<K, usize>,
) -> usize {
const IN_PROGRESS: usize = !0;

if let Some(&depth) = results.get(key) {
assert_ne!(depth, IN_PROGRESS, "cycle in DependencyQueue");
return depth;
}

results.insert(key.clone(), IN_PROGRESS);

let depth = 1 + map.get(&key)
.into_iter()
.flat_map(|it| it)
.map(|dep| depth(dep, map, results))
.max()
.unwrap_or(0);

*results.get_mut(key).unwrap() = depth;

depth
}
}

/// Dequeues a package that is ready to be built.
///
/// A package is ready to be built when it has 0 un-built dependencies. If
/// `None` is returned then no packages are ready to be built.
pub fn dequeue(&mut self) -> Option<(Freshness, K, V)> {
let key = match self.dep_map.iter()
.find(|&(_, &(ref deps, _))| deps.is_empty())
.map(|(key, _)| key.clone()) {
// Look at all our crates and find everything that's ready to build (no
// deps). After we've got that candidate set select the one which has
// the maximum depth in the dependency graph. This way we should
// hopefully keep CPUs hottest the longest by ensuring that long
// dependency chains are scheduled early on in the build process and the
// leafs higher in the tree can fill in the cracks later.
//
// TODO: it'd be best here to throw in a heuristic of crate size as
// well. For example how long did this crate historically take to
// compile? How large is its source code? etc.
let next = self.dep_map.iter()
.filter(|&(_, &(ref deps, _))| deps.is_empty())
.map(|(key, _)| key.clone())
.max_by_key(|k| self.depth[k]);
let key = match next {
Some(key) => key,
None => return None
};
Expand Down Expand Up @@ -142,3 +192,34 @@ impl<K: Hash + Eq + Clone, V> DependencyQueue<K, V> {
}
}
}

#[cfg(test)]
mod test {
use super::{DependencyQueue, Freshness};

#[test]
fn deep_first() {
let mut q = DependencyQueue::new();

q.queue(Freshness::Fresh, 1, (), &[]);
q.queue(Freshness::Fresh, 2, (), &[1]);
q.queue(Freshness::Fresh, 3, (), &[]);
q.queue(Freshness::Fresh, 4, (), &[2, 3]);
q.queue(Freshness::Fresh, 5, (), &[4, 3]);
q.queue_finished();

assert_eq!(q.dequeue(), Some((Freshness::Fresh, 1, ())));
assert_eq!(q.dequeue(), Some((Freshness::Fresh, 3, ())));
assert_eq!(q.dequeue(), None);
q.finish(&3, Freshness::Fresh);
assert_eq!(q.dequeue(), None);
q.finish(&1, Freshness::Fresh);
assert_eq!(q.dequeue(), Some((Freshness::Fresh, 2, ())));
assert_eq!(q.dequeue(), None);
q.finish(&2, Freshness::Fresh);
assert_eq!(q.dequeue(), Some((Freshness::Fresh, 4, ())));
assert_eq!(q.dequeue(), None);
q.finish(&4, Freshness::Fresh);
assert_eq!(q.dequeue(), Some((Freshness::Fresh, 5, ())));
}
}