Skip to content

Commit

Permalink
core: Remove unwrap_shared_mutable_state. #4436
Browse files Browse the repository at this point in the history
  • Loading branch information
brson committed Feb 28, 2013
1 parent b01d2ba commit 78d5091
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 251 deletions.
190 changes: 4 additions & 186 deletions src/libcore/private.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,20 +107,9 @@ fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool {
* Shared state & exclusive ARC
****************************************************************************/

struct UnwrapProtoInner {
contents: Option<(comm::ChanOne<()>, comm::PortOne<bool>)>,
}

// An unwrapper uses this protocol to communicate with the "other" task that
// drops the last refcount on an arc. Unfortunately this can't be a proper
// pipe protocol because the unwrapper has to access both stages at once.
type UnwrapProto = ~UnwrapProtoInner;

struct ArcData<T> {
mut count: libc::intptr_t,
mut unwrapper: int, // either a UnwrapProto or 0
// FIXME(#3224) should be able to make this non-option to save memory, and
// in unwrap() use "let ~ArcData { data: result, _ } = thing" to unwrap it
// FIXME(#3224) should be able to make this non-option to save memory
mut data: Option<T>,
}

Expand All @@ -131,37 +120,13 @@ struct ArcDestruct<T> {
impl<T> Drop for ArcDestruct<T>{
fn finalize(&self) {
unsafe {
if self.data.is_null() {
return; // Happens when destructing an unwrapper's handle.
}
do task::unkillable {
let data: ~ArcData<T> = cast::reinterpret_cast(&self.data);
let new_count =
intrinsics::atomic_xsub(&mut data.count, 1) - 1;
assert new_count >= 0;
if new_count == 0 {
// Were we really last, or should we hand off to an
// unwrapper? It's safe to not xchg because the unwrapper
// will set the unwrap lock *before* dropping his/her
// reference. In effect, being here means we're the only
// *awake* task with the data.
if data.unwrapper != 0 {
let mut p: UnwrapProto =
cast::reinterpret_cast(&data.unwrapper);
let (message, response) =
option::swap_unwrap(&mut p.contents);
// Send 'ready' and wait for a response.
comm::send_one(message, ());
// Unkillable wait. Message guaranteed to come.
if comm::recv_one(response) {
// Other task got the data.
cast::forget(data);
} else {
// Other task was killed. drop glue takes over.
}
} else {
// drop glue takes over.
}
// drop glue takes over.
} else {
cast::forget(data);
}
Expand All @@ -176,79 +141,6 @@ fn ArcDestruct<T>(data: *libc::c_void) -> ArcDestruct<T> {
}
}

pub unsafe fn unwrap_shared_mutable_state<T:Owned>(rc: SharedMutableState<T>)
-> T {
struct DeathThroes<T> {
mut ptr: Option<~ArcData<T>>,
mut response: Option<comm::ChanOne<bool>>,
}

impl<T> Drop for DeathThroes<T>{
fn finalize(&self) {
unsafe {
let response = option::swap_unwrap(&mut self.response);
// In case we get killed early, we need to tell the person who
// tried to wake us whether they should hand-off the data to
// us.
if task::failing() {
comm::send_one(response, false);
// Either this swap_unwrap or the one below (at "Got
// here") ought to run.
cast::forget(option::swap_unwrap(&mut self.ptr));
} else {
assert self.ptr.is_none();
comm::send_one(response, true);
}
}
}
}

do task::unkillable {
let ptr: ~ArcData<T> = cast::reinterpret_cast(&rc.data);
let (p1,c1) = comm::oneshot(); // ()
let (p2,c2) = comm::oneshot(); // bool
let mut server: UnwrapProto = ~UnwrapProtoInner {
contents: Some((c1,p2))
};
let serverp: int = cast::transmute(server);
// Try to put our server end in the unwrapper slot.
if compare_and_swap(&mut ptr.unwrapper, 0, serverp) {
// Got in. Step 0: Tell destructor not to run. We are now it.
rc.data = ptr::null();
// Step 1 - drop our own reference.
let new_count = intrinsics::atomic_xsub(&mut ptr.count, 1) - 1;
//assert new_count >= 0;
if new_count == 0 {
// We were the last owner. Can unwrap immediately.
// Also we have to free the server endpoints.
let _server: UnwrapProto = cast::transmute(serverp);
option::swap_unwrap(&mut ptr.data)
// drop glue takes over.
} else {
// The *next* person who sees the refcount hit 0 will wake us.
let end_result =
DeathThroes { ptr: Some(ptr),
response: Some(c2) };
let mut p1 = Some(p1); // argh
do task::rekillable {
comm::recv_one(option::swap_unwrap(&mut p1));
}
// Got here. Back in the 'unkillable' without getting killed.
// Recover ownership of ptr, then take the data out.
let ptr = option::swap_unwrap(&mut end_result.ptr);
option::swap_unwrap(&mut ptr.data)
// drop glue takes over.
}
} else {
// Somebody else was trying to unwrap. Avoid guaranteed deadlock.
cast::forget(ptr);
// Also we have to free the (rejected) server endpoints.
let _server: UnwrapProto = cast::transmute(serverp);
fail!(~"Another task is already unwrapping this ARC!");
}
}
}

/**
* COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc.
*
Expand All @@ -259,7 +151,7 @@ pub type SharedMutableState<T> = ArcDestruct<T>;

pub unsafe fn shared_mutable_state<T:Owned>(data: T) ->
SharedMutableState<T> {
let data = ~ArcData { count: 1, unwrapper: 0, data: Some(data) };
let data = ~ArcData { count: 1, data: Some(data) };
unsafe {
let ptr = cast::transmute(data);
ArcDestruct(ptr)
Expand Down Expand Up @@ -413,22 +305,14 @@ impl<T:Owned> Exclusive<T> {
}
}

// FIXME(#3724) make this a by-move method on the exclusive
pub fn unwrap_exclusive<T:Owned>(arc: Exclusive<T>) -> T {
let Exclusive { x: x } = arc;
let inner = unsafe { unwrap_shared_mutable_state(x) };
let ExData { data: data, _ } = inner;
data
}

#[cfg(test)]
pub mod tests {
use core::option::{None, Some};

use cell::Cell;
use comm;
use option;
use private::{exclusive, unwrap_exclusive};
use private::exclusive;
use result;
use task;
use uint;
Expand Down Expand Up @@ -479,70 +363,4 @@ pub mod tests {
assert *one == 1;
}
}

#[test]
pub fn exclusive_unwrap_basic() {
let x = exclusive(~~"hello");
assert unwrap_exclusive(x) == ~~"hello";
}

#[test]
pub fn exclusive_unwrap_contended() {
let x = exclusive(~~"hello");
let x2 = Cell(x.clone());
do task::spawn {
let x2 = x2.take();
do x2.with |_hello| { }
task::yield();
}
assert unwrap_exclusive(x) == ~~"hello";

// Now try the same thing, but with the child task blocking.
let x = exclusive(~~"hello");
let x2 = Cell(x.clone());
let mut res = None;
do task::task().future_result(|+r| res = Some(r)).spawn {
let x2 = x2.take();
assert unwrap_exclusive(x2) == ~~"hello";
}
// Have to get rid of our reference before blocking.
{ let _x = x; } // FIXME(#3161) util::ignore doesn't work here
let res = option::swap_unwrap(&mut res);
res.recv();
}

#[test] #[should_fail] #[ignore(cfg(windows))]
pub fn exclusive_unwrap_conflict() {
let x = exclusive(~~"hello");
let x2 = Cell(x.clone());
let mut res = None;
do task::task().future_result(|+r| res = Some(r)).spawn {
let x2 = x2.take();
assert unwrap_exclusive(x2) == ~~"hello";
}
assert unwrap_exclusive(x) == ~~"hello";
let res = option::swap_unwrap(&mut res);
// See #4689 for why this can't be just "res.recv()".
assert res.recv() == task::Success;
}

#[test] #[ignore(cfg(windows))]
pub fn exclusive_unwrap_deadlock() {
// This is not guaranteed to get to the deadlock before being killed,
// but it will show up sometimes, and if the deadlock were not there,
// the test would nondeterministically fail.
let result = do task::try {
// a task that has two references to the same exclusive will
// deadlock when it unwraps. nothing to be done about that.
let x = exclusive(~~"hello");
let x2 = x.clone();
do task::spawn {
for 10.times { task::yield(); } // try to let the unwrapper go
fail!(); // punt it awake from its deadlock
}
let _z = unwrap_exclusive(x);
do x2.with |_hello| { }
};
assert result.is_err();
}
}
66 changes: 1 addition & 65 deletions src/libstd/arc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use core::cell::Cell;
use core::pipes;
use core::prelude::*;
use core::private::{SharedMutableState, shared_mutable_state};
use core::private::{clone_shared_mutable_state, unwrap_shared_mutable_state};
use core::private::{clone_shared_mutable_state};
use core::private::{get_shared_mutable_state, get_shared_immutable_state};
use core::ptr;
use core::task;
Expand Down Expand Up @@ -104,20 +104,6 @@ pub fn clone<T:Const + Owned>(rc: &ARC<T>) -> ARC<T> {
ARC { x: unsafe { clone_shared_mutable_state(&rc.x) } }
}

/**
* Retrieve the data back out of the ARC. This function blocks until the
* reference given to it is the last existing one, and then unwrap the data
* instead of destroying it.
*
* If multiple tasks call unwrap, all but the first will fail. Do not call
* unwrap from a task that holds another reference to the same ARC; it is
* guaranteed to deadlock.
*/
pub fn unwrap<T:Const + Owned>(rc: ARC<T>) -> T {
let ARC { x: x } = rc;
unsafe { unwrap_shared_mutable_state(x) }
}

impl<T:Const + Owned> Clone for ARC<T> {
fn clone(&self) -> ARC<T> {
clone(self)
Expand Down Expand Up @@ -213,23 +199,6 @@ impl<T:Owned> &MutexARC<T> {
}
}

/**
* Retrieves the data, blocking until all other references are dropped,
* exactly as arc::unwrap.
*
* Will additionally fail if another task has failed while accessing the arc.
*/
// FIXME(#3724) make this a by-move method on the arc
pub fn unwrap_mutex_arc<T:Owned>(arc: MutexARC<T>) -> T {
let MutexARC { x: x } = arc;
let inner = unsafe { unwrap_shared_mutable_state(x) };
let MutexARCInner { failed: failed, data: data, _ } = inner;
if failed {
fail!(~"Can't unwrap poisoned MutexARC - another task failed inside!")
}
data
}

// Common code for {mutex.access,rwlock.write}{,_cond}.
#[inline(always)]
#[doc(hidden)]
Expand Down Expand Up @@ -411,24 +380,6 @@ impl<T:Const + Owned> &RWARC<T> {
}
}

/**
* Retrieves the data, blocking until all other references are dropped,
* exactly as arc::unwrap.
*
* Will additionally fail if another task has failed while accessing the arc
* in write mode.
*/
// FIXME(#3724) make this a by-move method on the arc
pub fn unwrap_rw_arc<T:Const + Owned>(arc: RWARC<T>) -> T {
let RWARC { x: x, _ } = arc;
let inner = unsafe { unwrap_shared_mutable_state(x) };
let RWARCInner { failed: failed, data: data, _ } = inner;
if failed {
fail!(~"Can't unwrap poisoned RWARC - another task failed inside!")
}
data
}

// Borrowck rightly complains about immutably aliasing the rwlock in order to
// lock it. This wraps the unsafety, with the justification that the 'lock'
// field is never overwritten; only 'failed' and 'data'.
Expand Down Expand Up @@ -585,21 +536,6 @@ mod tests {
}
}
#[test] #[should_fail] #[ignore(cfg(windows))]
pub fn test_mutex_arc_unwrap_poison() {
let arc = MutexARC(1);
let arc2 = ~(&arc).clone();
let (p, c) = comm::stream();
do task::spawn || {
do arc2.access |one| {
c.send(());
assert *one == 2;
}
}
let _ = p.recv();
let one = unwrap_mutex_arc(arc);
assert one == 1;
}
#[test] #[should_fail] #[ignore(cfg(windows))]
pub fn test_rw_arc_poison_wr() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
Expand Down

5 comments on commit 78d5091

@bors
Copy link
Contributor

@bors bors commented on 78d5091 Mar 1, 2013

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

saw approval from nikomatsakis
at brson@78d5091

@bors
Copy link
Contributor

@bors bors commented on 78d5091 Mar 1, 2013

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

merging brson/rust/unwrap_shared_mutable_state = 78d5091 into auto

@bors
Copy link
Contributor

@bors bors commented on 78d5091 Mar 1, 2013

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

brson/rust/unwrap_shared_mutable_state = 78d5091 merged ok, testing candidate = 916d1a9

@bors
Copy link
Contributor

@bors bors commented on 78d5091 Mar 1, 2013

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bors
Copy link
Contributor

@bors bors commented on 78d5091 Mar 1, 2013

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fast-forwarding incoming to auto = 916d1a9

Please sign in to comment.