Skip to content

Commit

Permalink
Remove Runtime trait
Browse files Browse the repository at this point in the history
This commit removes most of the remaining runtime infrastructure related
to the green/native split. In particular, it removes the `Runtime` trait
and instead inlines the native implementation.

Closes rust-lang#17325

[breaking-change]
  • Loading branch information
aturon committed Nov 21, 2014
1 parent 770378a commit ad022b1
Show file tree
Hide file tree
Showing 5 changed files with 172 additions and 503 deletions.
246 changes: 0 additions & 246 deletions src/libnative/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,252 +24,6 @@ use std::rt::task::{Task, BlockedTask, TaskOpts};
use std::rt::thread::Thread;
use std::rt;

use std::task::{TaskBuilder, Spawner};

/// Creates a new Task which is ready to execute as a 1:1 task.
pub fn new(stack_bounds: (uint, uint), stack_guard: uint) -> Box<Task> {
let mut task = box Task::new();
let mut ops = ops();
ops.stack_bounds = stack_bounds;
ops.stack_guard = stack_guard;
task.put_runtime(ops);
return task;
}

fn ops() -> Box<Ops> {
box Ops {
lock: unsafe { NativeMutex::new() },
awoken: false,
// these *should* get overwritten
stack_bounds: (0, 0),
stack_guard: 0
}
}

/// A spawner for native tasks
pub struct NativeSpawner;

impl Spawner for NativeSpawner {
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let TaskOpts { name, stack_size, on_exit } = opts;

let mut task = box Task::new();
task.name = name;
task.death.on_exit = on_exit;

let stack = stack_size.unwrap_or(rt::min_stack());
let task = task;
let ops = ops();

// Note that this increment must happen *before* the spawn in order to
// guarantee that if this task exits it will always end up waiting for
// the spawned task to exit.
let token = bookkeeping::increment();

// Spawning a new OS thread guarantees that __morestack will never get
// triggered, but we must manually set up the actual stack bounds once
// this function starts executing. This raises the lower limit by a bit
// because by the time that this function is executing we've already
// consumed at least a little bit of stack (we don't know the exact byte
// address at which our stack started).
Thread::spawn_stack(stack, proc() {
let something_around_the_top_of_the_stack = 1;
let addr = &something_around_the_top_of_the_stack as *const int;
let my_stack = addr as uint;
unsafe {
stack::record_os_managed_stack_bounds(my_stack - stack + 1024,
my_stack);
}
let mut ops = ops;
ops.stack_guard = rt::thread::current_guard_page();
ops.stack_bounds = (my_stack - stack + 1024, my_stack);

let mut f = Some(f);
let mut task = task;
task.put_runtime(ops);
drop(task.run(|| { f.take().unwrap()() }).destroy());
drop(token);
})
}
}

/// An extension trait adding a `native` configuration method to `TaskBuilder`.
pub trait NativeTaskBuilder {
fn native(self) -> TaskBuilder<NativeSpawner>;
}

impl<S: Spawner> NativeTaskBuilder for TaskBuilder<S> {
fn native(self) -> TaskBuilder<NativeSpawner> {
self.spawner(NativeSpawner)
}
}

// This structure is the glue between channels and the 1:1 scheduling mode. This
// structure is allocated once per task.
struct Ops {
lock: NativeMutex, // native synchronization
awoken: bool, // used to prevent spurious wakeups

// This field holds the known bounds of the stack in (lo, hi) form. Not all
// native tasks necessarily know their precise bounds, hence this is
// optional.
stack_bounds: (uint, uint),

stack_guard: uint
}

impl rt::Runtime for Ops {
fn yield_now(self: Box<Ops>, mut cur_task: Box<Task>) {
// put the task back in TLS and then invoke the OS thread yield
cur_task.put_runtime(self);
Local::put(cur_task);
Thread::yield_now();
}

fn maybe_yield(self: Box<Ops>, mut cur_task: Box<Task>) {
// just put the task back in TLS, on OS threads we never need to
// opportunistically yield b/c the OS will do that for us (preemption)
cur_task.put_runtime(self);
Local::put(cur_task);
}

fn wrap(self: Box<Ops>) -> Box<Any+'static> {
self as Box<Any+'static>
}

fn stack_bounds(&self) -> (uint, uint) { self.stack_bounds }

fn stack_guard(&self) -> Option<uint> {
if self.stack_guard != 0 {
Some(self.stack_guard)
} else {
None
}
}

fn can_block(&self) -> bool { true }

// This function gets a little interesting. There are a few safety and
// ownership violations going on here, but this is all done in the name of
// shared state. Additionally, all of the violations are protected with a
// mutex, so in theory there are no races.
//
// The first thing we need to do is to get a pointer to the task's internal
// mutex. This address will not be changing (because the task is allocated
// on the heap). We must have this handle separately because the task will
// have its ownership transferred to the given closure. We're guaranteed,
// however, that this memory will remain valid because *this* is the current
// task's execution thread.
//
// The next weird part is where ownership of the task actually goes. We
// relinquish it to the `f` blocking function, but upon returning this
// function needs to replace the task back in TLS. There is no communication
// from the wakeup thread back to this thread about the task pointer, and
// there's really no need to. In order to get around this, we cast the task
// to a `uint` which is then used at the end of this function to cast back
// to a `Box<Task>` object. Naturally, this looks like it violates
// ownership semantics in that there may be two `Box<Task>` objects.
//
// The fun part is that the wakeup half of this implementation knows to
// "forget" the task on the other end. This means that the awakening half of
// things silently relinquishes ownership back to this thread, but not in a
// way that the compiler can understand. The task's memory is always valid
// for both tasks because these operations are all done inside of a mutex.
//
// You'll also find that if blocking fails (the `f` function hands the
// BlockedTask back to us), we will `mem::forget` the handles. The
// reasoning for this is the same logic as above in that the task silently
// transfers ownership via the `uint`, not through normal compiler
// semantics.
//
// On a mildly unrelated note, it should also be pointed out that OS
// condition variables are susceptible to spurious wakeups, which we need to
// be ready for. In order to accommodate for this fact, we have an extra
// `awoken` field which indicates whether we were actually woken up via some
// invocation of `reawaken`. This flag is only ever accessed inside the
// lock, so there's no need to make it atomic.
fn deschedule(mut self: Box<Ops>,
times: uint,
mut cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
let me = &mut *self as *mut Ops;
cur_task.put_runtime(self);

unsafe {
let cur_task_dupe = &mut *cur_task as *mut Task;
let task = BlockedTask::block(cur_task);

if times == 1 {
let guard = (*me).lock.lock();
(*me).awoken = false;
match f(task) {
Ok(()) => {
while !(*me).awoken {
guard.wait();
}
}
Err(task) => { mem::forget(task.wake()); }
}
} else {
let iter = task.make_selectable(times);
let guard = (*me).lock.lock();
(*me).awoken = false;

// Apply the given closure to all of the "selectable tasks",
// bailing on the first one that produces an error. Note that
// care must be taken such that when an error is occurred, we
// may not own the task, so we may still have to wait for the
// task to become available. In other words, if task.wake()
// returns `None`, then someone else has ownership and we must
// wait for their signal.
match iter.map(f).filter_map(|a| a.err()).next() {
None => {}
Some(task) => {
match task.wake() {
Some(task) => {
mem::forget(task);
(*me).awoken = true;
}
None => {}
}
}
}
while !(*me).awoken {
guard.wait();
}
}
// re-acquire ownership of the task
cur_task = mem::transmute(cur_task_dupe);
}

// put the task back in TLS, and everything is as it once was.
Local::put(cur_task);
}

// See the comments on `deschedule` for why the task is forgotten here, and
// why it's valid to do so.
fn reawaken(mut self: Box<Ops>, mut to_wake: Box<Task>) {
unsafe {
let me = &mut *self as *mut Ops;
to_wake.put_runtime(self);
mem::forget(to_wake);
let guard = (*me).lock.lock();
(*me).awoken = true;
guard.signal();
}
}

fn spawn_sibling(self: Box<Ops>,
mut cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send) {
cur_task.put_runtime(self);
Local::put(cur_task);

NativeSpawner.spawn(opts, f);
}
}

#[cfg(test)]
mod tests {
use std::rt::local::Local;
Expand Down
40 changes: 0 additions & 40 deletions src/librustrt/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,6 @@ pub use self::unwind::{begin_unwind, begin_unwind_fmt};

use core::prelude::*;

use alloc::boxed::Box;
use core::any::Any;

use task::{Task, BlockedTask, TaskOpts};

mod macros;

mod at_exit_imp;
Expand All @@ -60,46 +55,11 @@ pub mod exclusive;
pub mod local;
pub mod local_data;
pub mod mutex;
pub mod rtio;
pub mod stack;
pub mod task;
pub mod thread;
pub mod unwind;

/// The interface to the current runtime.
///
/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
/// two independent crates, libnative and libgreen, both have objects which
/// implement this trait. The goal of this trait is to encompass all the
/// fundamental differences in functionality between the 1:1 and M:N runtime
/// modes.
pub trait Runtime {
// Necessary scheduling functions, used for channels and blocking I/O
// (sometimes).
fn yield_now(self: Box<Self>, cur_task: Box<Task>);
fn maybe_yield(self: Box<Self>, cur_task: Box<Task>);
fn deschedule(self: Box<Self>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>);
fn reawaken(self: Box<Self>, to_wake: Box<Task>);

// Miscellaneous calls which are very different depending on what context
// you're in.
fn spawn_sibling(self: Box<Self>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send);
/// The (low, high) edges of the current stack.
fn stack_bounds(&self) -> (uint, uint); // (lo, hi)
/// The last writable byte of the stack next to the guard page
fn stack_guard(&self) -> Option<uint>;
fn can_block(&self) -> bool;

// FIXME: This is a serious code smell and this should not exist at all.
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}

/// The default error code of the rust runtime if the main task panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: int = 101;
Expand Down
45 changes: 0 additions & 45 deletions src/librustrt/rtio.rs

This file was deleted.

Loading

0 comments on commit ad022b1

Please sign in to comment.