diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000000..307e22b0df1fd --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,6 @@ +# format the world +a06baa56b95674fc626b3c3fd680d6a65357fe60 +# format libcore +95e00bfed801e264e9c4ac817004153ca0f19eb6 +# reformat with new rustfmt +971c549ca334b7b7406e61e958efcca9c4152822 diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs index f454141dc5229..0bb6559e65452 100644 --- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs +++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs @@ -55,75 +55,93 @@ impl<'tcx> RegionInferenceContext<'tcx> { infcx: &InferCtxt<'_, 'tcx>, opaque_ty_decls: VecMap, (OpaqueHiddenType<'tcx>, OpaqueTyOrigin)>, ) -> VecMap, OpaqueHiddenType<'tcx>> { - opaque_ty_decls - .into_iter() - .map(|(opaque_type_key, (concrete_type, origin))| { - let substs = opaque_type_key.substs; - debug!(?concrete_type, ?substs); + let mut result: VecMap, OpaqueHiddenType<'tcx>> = VecMap::new(); + for (opaque_type_key, (concrete_type, origin)) in opaque_ty_decls { + let substs = opaque_type_key.substs; + debug!(?concrete_type, ?substs); - let mut subst_regions = vec![self.universal_regions.fr_static]; - let universal_substs = infcx.tcx.fold_regions(substs, &mut false, |region, _| { - if let ty::RePlaceholder(..) = region.kind() { - // Higher kinded regions don't need remapping, they don't refer to anything outside of this the substs. - return region; + let mut subst_regions = vec![self.universal_regions.fr_static]; + let universal_substs = infcx.tcx.fold_regions(substs, &mut false, |region, _| { + if let ty::RePlaceholder(..) = region.kind() { + // Higher kinded regions don't need remapping, they don't refer to anything outside of this the substs. + return region; + } + let vid = self.to_region_vid(region); + trace!(?vid); + let scc = self.constraint_sccs.scc(vid); + trace!(?scc); + match self.scc_values.universal_regions_outlived_by(scc).find_map(|lb| { + self.eval_equal(vid, lb).then_some(self.definitions[lb].external_name?) + }) { + Some(region) => { + let vid = self.universal_regions.to_region_vid(region); + subst_regions.push(vid); + region } - let vid = self.to_region_vid(region); - trace!(?vid); - let scc = self.constraint_sccs.scc(vid); - trace!(?scc); - match self.scc_values.universal_regions_outlived_by(scc).find_map(|lb| { - self.eval_equal(vid, lb).then_some(self.definitions[lb].external_name?) - }) { - Some(region) => { - let vid = self.universal_regions.to_region_vid(region); - subst_regions.push(vid); - region - } - None => { - subst_regions.push(vid); - infcx.tcx.sess.delay_span_bug( - concrete_type.span, - "opaque type with non-universal region substs", - ); - infcx.tcx.lifetimes.re_static - } + None => { + subst_regions.push(vid); + infcx.tcx.sess.delay_span_bug( + concrete_type.span, + "opaque type with non-universal region substs", + ); + infcx.tcx.lifetimes.re_static } - }); + } + }); - subst_regions.sort(); - subst_regions.dedup(); + subst_regions.sort(); + subst_regions.dedup(); - let universal_concrete_type = - infcx.tcx.fold_regions(concrete_type, &mut false, |region, _| match *region { - ty::ReVar(vid) => subst_regions - .iter() - .find(|ur_vid| self.eval_equal(vid, **ur_vid)) - .and_then(|ur_vid| self.definitions[*ur_vid].external_name) - .unwrap_or(infcx.tcx.lifetimes.re_root_empty), - _ => region, - }); + let universal_concrete_type = + infcx.tcx.fold_regions(concrete_type, &mut false, |region, _| match *region { + ty::ReVar(vid) => subst_regions + .iter() + .find(|ur_vid| self.eval_equal(vid, **ur_vid)) + .and_then(|ur_vid| self.definitions[*ur_vid].external_name) + .unwrap_or(infcx.tcx.lifetimes.re_root_empty), + _ => region, + }); - debug!(?universal_concrete_type, ?universal_substs); + debug!(?universal_concrete_type, ?universal_substs); - let opaque_type_key = - OpaqueTypeKey { def_id: opaque_type_key.def_id, substs: universal_substs }; - let remapped_type = infcx.infer_opaque_definition_from_instantiation( - opaque_type_key, - universal_concrete_type, - ); - let ty = if check_opaque_type_parameter_valid( - infcx.tcx, - opaque_type_key, - origin, - concrete_type.span, - ) { - remapped_type - } else { - infcx.tcx.ty_error() - }; - (opaque_type_key, OpaqueHiddenType { ty, span: concrete_type.span }) - }) - .collect() + let opaque_type_key = + OpaqueTypeKey { def_id: opaque_type_key.def_id, substs: universal_substs }; + let remapped_type = infcx.infer_opaque_definition_from_instantiation( + opaque_type_key, + universal_concrete_type, + ); + let ty = if check_opaque_type_parameter_valid( + infcx.tcx, + opaque_type_key, + origin, + concrete_type.span, + ) { + remapped_type + } else { + infcx.tcx.ty_error() + }; + // Sometimes two opaque types are the same only after we remap the generic parameters + // back to the opaque type definition. E.g. we may have `OpaqueType` mapped to `(X, Y)` + // and `OpaqueType` mapped to `(Y, X)`, and those are the same, but we only know that + // once we convert the generic parameters to those of the opaque type. + if let Some(prev) = result.get_mut(&opaque_type_key) { + if prev.ty != ty { + let mut err = infcx.tcx.sess.struct_span_err( + concrete_type.span, + &format!("hidden type `{}` differed from previous `{}`", ty, prev.ty), + ); + err.span_note(prev.span, "previous hidden type bound here"); + err.emit(); + prev.ty = infcx.tcx.ty_error(); + } + // Pick a better span if there is one. + // FIXME(oli-obk): collect multiple spans for better diagnostics down the road. + prev.span = prev.span.substitute_dummy(concrete_type.span); + } else { + result.insert(opaque_type_key, OpaqueHiddenType { ty, span: concrete_type.span }); + } + } + result } /// Map the regions in the type to named regions. This is similar to what diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs new file mode 100644 index 0000000000000..79aac16355061 --- /dev/null +++ b/compiler/rustc_mir_transform/src/deref_separator.rs @@ -0,0 +1,72 @@ +use crate::MirPass; +use rustc_middle::mir::patch::MirPatch; +use rustc_middle::mir::*; +use rustc_middle::ty::TyCtxt; +pub struct Derefer; + +pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let mut patch = MirPatch::new(body); + let (basic_blocks, local_decl) = body.basic_blocks_and_local_decls_mut(); + for (block, data) in basic_blocks.iter_enumerated_mut() { + for (i, stmt) in data.statements.iter_mut().enumerate() { + match stmt.kind { + StatementKind::Assign(box (og_place, Rvalue::Ref(region, borrow_knd, place))) => { + for (idx, (p_ref, p_elem)) in place.iter_projections().enumerate() { + if p_elem == ProjectionElem::Deref && !p_ref.projection.is_empty() { + // The type that we are derefing. + let ty = p_ref.ty(local_decl, tcx).ty; + let temp = patch.new_temp(ty, stmt.source_info.span); + + // Because we are assigning this right before original statement + // we are using index i of statement. + let loc = Location { block: block, statement_index: i }; + patch.add_statement(loc, StatementKind::StorageLive(temp)); + + // We are adding current p_ref's projections to our + // temp value. + let deref_place = + Place::from(p_ref.local).project_deeper(p_ref.projection, tcx); + patch.add_assign( + loc, + Place::from(temp), + Rvalue::Use(Operand::Move(deref_place)), + ); + + // We are creating a place by using our temp value's location + // and copying derefed values which we need to create new statement. + let temp_place = + Place::from(temp).project_deeper(&place.projection[idx..], tcx); + let new_stmt = Statement { + source_info: stmt.source_info, + kind: StatementKind::Assign(Box::new(( + og_place, + Rvalue::Ref(region, borrow_knd, temp_place), + ))), + }; + + // Replace current statement with newly created one. + *stmt = new_stmt; + + // Since our job with the temp is done it should be gone + let loc = Location { block: block, statement_index: i + 1 }; + patch.add_statement(loc, StatementKind::StorageDead(temp)); + + // As all projections are off the base projection, if there are + // multiple derefs in the middle of projection, it might cause + // unsoundness, to not let that happen we break the loop. + break; + } + } + } + _ => (), + } + } + } + patch.apply(body); +} + +impl<'tcx> MirPass<'tcx> for Derefer { + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + deref_finder(tcx, body); + } +} diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 2fca498a12502..059ee09dfd794 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -53,6 +53,7 @@ mod const_prop_lint; mod coverage; mod deaggregator; mod deduplicate_blocks; +mod deref_separator; mod dest_prop; pub mod dump_mir; mod early_otherwise_branch; @@ -431,6 +432,7 @@ fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tc // `Deaggregator` is conceptually part of MIR building, some backends rely on it happening // and it can help optimizations. &deaggregator::Deaggregator, + &deref_separator::Derefer, &Lint(const_prop_lint::ConstProp), ]; diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 03de7eed6d4c8..f1baf077580e6 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -20,7 +20,7 @@ use crate::process; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sys::stdio::panic_output; use crate::sys_common::backtrace; -use crate::sys_common::rwlock::StaticRWLock; +use crate::sys_common::rwlock::StaticRwLock; use crate::sys_common::thread_info; use crate::thread; @@ -83,7 +83,7 @@ impl Hook { } } -static HOOK_LOCK: StaticRWLock = StaticRWLock::new(); +static HOOK_LOCK: StaticRwLock = StaticRwLock::new(); static mut HOOK: Hook = Hook::Default; /// Registers a custom panic hook, replacing any that was previously registered. diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs index 2e72a9ef54e5f..ed62fa977beca 100644 --- a/library/std/src/sync/rwlock.rs +++ b/library/std/src/sync/rwlock.rs @@ -76,7 +76,7 @@ use crate::sys_common::rwlock as sys; /// [`Mutex`]: super::Mutex #[stable(feature = "rust1", since = "1.0.0")] pub struct RwLock { - inner: sys::MovableRWLock, + inner: sys::MovableRwLock, poison: poison::Flag, data: UnsafeCell, } @@ -146,7 +146,7 @@ impl RwLock { #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> RwLock { RwLock { - inner: sys::MovableRWLock::new(), + inner: sys::MovableRwLock::new(), poison: poison::Flag::new(), data: UnsafeCell::new(t), } diff --git a/library/std/src/sys/hermit/rwlock.rs b/library/std/src/sys/hermit/rwlock.rs index 1cca809764c8c..690bb155e1a27 100644 --- a/library/std/src/sys/hermit/rwlock.rs +++ b/library/std/src/sys/hermit/rwlock.rs @@ -1,13 +1,13 @@ use crate::cell::UnsafeCell; use crate::sys::locks::{Condvar, Mutex}; -pub struct RWLock { +pub struct RwLock { lock: Mutex, cond: Condvar, state: UnsafeCell, } -pub type MovableRWLock = RWLock; +pub type MovableRwLock = RwLock; enum State { Unlocked, @@ -15,8 +15,8 @@ enum State { Writing, } -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} // This rwlock implementation is a relatively simple implementation which has a // condition variable for readers/writers as well as a mutex protecting the @@ -26,9 +26,9 @@ unsafe impl Sync for RWLock {} // hopefully correct this implementation is very likely to want to be changed in // the future. -impl RWLock { - pub const fn new() -> RWLock { - RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } +impl RwLock { + pub const fn new() -> RwLock { + RwLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } } #[inline] diff --git a/library/std/src/sys/sgx/rwlock.rs b/library/std/src/sys/sgx/rwlock.rs index 2d038b518965b..47be4c006ec7e 100644 --- a/library/std/src/sys/sgx/rwlock.rs +++ b/library/std/src/sys/sgx/rwlock.rs @@ -8,25 +8,25 @@ use super::waitqueue::{ }; use crate::mem; -pub struct RWLock { +pub struct RwLock { readers: SpinMutex>>, writer: SpinMutex>, } -pub type MovableRWLock = Box; +pub type MovableRwLock = Box; -// Check at compile time that RWLock size matches C definition (see test_c_rwlock_initializer below) +// Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below) // // # Safety // Never called, as it is a compile time check. #[allow(dead_code)] -unsafe fn rw_lock_size_assert(r: RWLock) { - unsafe { mem::transmute::(r) }; +unsafe fn rw_lock_size_assert(r: RwLock) { + unsafe { mem::transmute::(r) }; } -impl RWLock { - pub const fn new() -> RWLock { - RWLock { +impl RwLock { + pub const fn new() -> RwLock { + RwLock { readers: SpinMutex::new(WaitVariable::new(None)), writer: SpinMutex::new(WaitVariable::new(false)), } @@ -180,7 +180,7 @@ const EINVAL: i32 = 22; #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RWLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 { if p.is_null() { return EINVAL; } @@ -190,7 +190,7 @@ pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RWLock) -> i32 { #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RWLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 { if p.is_null() { return EINVAL; } @@ -199,7 +199,7 @@ pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RWLock) -> i32 { } #[cfg(not(test))] #[no_mangle] -pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RWLock) -> i32 { +pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 { if p.is_null() { return EINVAL; } diff --git a/library/std/src/sys/sgx/rwlock/tests.rs b/library/std/src/sys/sgx/rwlock/tests.rs index 17c9e72ee39fa..4799961154a47 100644 --- a/library/std/src/sys/sgx/rwlock/tests.rs +++ b/library/std/src/sys/sgx/rwlock/tests.rs @@ -1,7 +1,7 @@ use super::*; -// Verify that the byte pattern libunwind uses to initialize an RWLock is -// equivalent to the value of RWLock::new(). If the value changes, +// Verify that the byte pattern libunwind uses to initialize an RwLock is +// equivalent to the value of RwLock::new(). If the value changes, // `src/UnwindRustSgx.h` in libunwind needs to be changed too. #[test] fn test_c_rwlock_initializer() { @@ -18,9 +18,9 @@ fn test_c_rwlock_initializer() { /* 0x80 */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ]; - // For the test to work, we need the padding/unused bytes in RWLock to be + // For the test to work, we need the padding/unused bytes in RwLock to be // initialized as 0. In practice, this is the case with statics. - static RUST_RWLOCK_INIT: RWLock = RWLock::new(); + static RUST_RWLOCK_INIT: RwLock = RwLock::new(); unsafe { // If the assertion fails, that not necessarily an issue with the value diff --git a/library/std/src/sys/solid/os.rs b/library/std/src/sys/solid/os.rs index 127cca3accad9..719d95bbe50a8 100644 --- a/library/std/src/sys/solid/os.rs +++ b/library/std/src/sys/solid/os.rs @@ -8,7 +8,7 @@ use crate::os::{ solid::ffi::{OsStrExt, OsStringExt}, }; use crate::path::{self, PathBuf}; -use crate::sys_common::rwlock::StaticRWLock; +use crate::sys_common::rwlock::StaticRwLock; use crate::vec; use super::{abi, error, itron, memchr}; @@ -78,7 +78,7 @@ pub fn current_exe() -> io::Result { unsupported() } -static ENV_LOCK: StaticRWLock = StaticRWLock::new(); +static ENV_LOCK: StaticRwLock = StaticRwLock::new(); pub struct Env { iter: vec::IntoIter<(OsString, OsString)>, diff --git a/library/std/src/sys/solid/rwlock.rs b/library/std/src/sys/solid/rwlock.rs index 4e39ac2a93071..df16cc680ad84 100644 --- a/library/std/src/sys/solid/rwlock.rs +++ b/library/std/src/sys/solid/rwlock.rs @@ -7,24 +7,24 @@ use super::{ }, }; -pub struct RWLock { +pub struct RwLock { /// The ID of the underlying mutex object rwl: SpinIdOnceCell<()>, } -pub type MovableRWLock = RWLock; +pub type MovableRwLock = RwLock; // Safety: `num_readers` is protected by `mtx_num_readers` -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} fn new_rwl() -> Result { ItronError::err_if_negative(unsafe { abi::rwl_acre_rwl() }) } -impl RWLock { - pub const fn new() -> RWLock { - RWLock { rwl: SpinIdOnceCell::new() } +impl RwLock { + pub const fn new() -> RwLock { + RwLock { rwl: SpinIdOnceCell::new() } } /// Get the inner mutex's ID, which is lazily created. diff --git a/library/std/src/sys/unix/locks/mod.rs b/library/std/src/sys/unix/locks/mod.rs index 30e9f407eec4c..2b8dd168068b5 100644 --- a/library/std/src/sys/unix/locks/mod.rs +++ b/library/std/src/sys/unix/locks/mod.rs @@ -10,7 +10,7 @@ cfg_if::cfg_if! { mod pthread_rwlock; // FIXME: Implement this using a futex pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar}; pub use pthread_remutex::ReentrantMutex; - pub use pthread_rwlock::{RWLock, MovableRWLock}; + pub use pthread_rwlock::{RwLock, MovableRwLock}; } else { mod pthread_mutex; mod pthread_remutex; @@ -18,7 +18,7 @@ cfg_if::cfg_if! { mod pthread_condvar; pub use pthread_mutex::{Mutex, MovableMutex}; pub use pthread_remutex::ReentrantMutex; - pub use pthread_rwlock::{RWLock, MovableRWLock}; + pub use pthread_rwlock::{RwLock, MovableRwLock}; pub use pthread_condvar::{Condvar, MovableCondvar}; } } diff --git a/library/std/src/sys/unix/locks/pthread_rwlock.rs b/library/std/src/sys/unix/locks/pthread_rwlock.rs index 1318c5b8e3a61..11a0c0457cd1a 100644 --- a/library/std/src/sys/unix/locks/pthread_rwlock.rs +++ b/library/std/src/sys/unix/locks/pthread_rwlock.rs @@ -1,20 +1,20 @@ use crate::cell::UnsafeCell; use crate::sync::atomic::{AtomicUsize, Ordering}; -pub struct RWLock { +pub struct RwLock { inner: UnsafeCell, write_locked: UnsafeCell, // guarded by the `inner` RwLock num_readers: AtomicUsize, } -pub type MovableRWLock = Box; +pub type MovableRwLock = Box; -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} -impl RWLock { - pub const fn new() -> RWLock { - RWLock { +impl RwLock { + pub const fn new() -> RwLock { + RwLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), write_locked: UnsafeCell::new(false), num_readers: AtomicUsize::new(0), diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs index 0b6cdb923bd6a..1be733ba106e8 100644 --- a/library/std/src/sys/unix/os.rs +++ b/library/std/src/sys/unix/os.rs @@ -20,7 +20,7 @@ use crate::str; use crate::sys::cvt; use crate::sys::fd; use crate::sys::memchr; -use crate::sys_common::rwlock::{StaticRWLock, StaticRWLockReadGuard}; +use crate::sys_common::rwlock::{StaticRwLock, StaticRwLockReadGuard}; use crate::vec; #[cfg(all(target_env = "gnu", not(target_os = "vxworks")))] @@ -481,9 +481,9 @@ pub unsafe fn environ() -> *mut *const *const c_char { ptr::addr_of_mut!(environ) } -static ENV_LOCK: StaticRWLock = StaticRWLock::new(); +static ENV_LOCK: StaticRwLock = StaticRwLock::new(); -pub fn env_read_lock() -> StaticRWLockReadGuard { +pub fn env_read_lock() -> StaticRwLockReadGuard { ENV_LOCK.read() } diff --git a/library/std/src/sys/unsupported/locks/mod.rs b/library/std/src/sys/unsupported/locks/mod.rs index 5634f10633963..35bd59130346f 100644 --- a/library/std/src/sys/unsupported/locks/mod.rs +++ b/library/std/src/sys/unsupported/locks/mod.rs @@ -3,4 +3,4 @@ mod mutex; mod rwlock; pub use condvar::{Condvar, MovableCondvar}; pub use mutex::{MovableMutex, Mutex, ReentrantMutex}; -pub use rwlock::{MovableRWLock, RWLock}; +pub use rwlock::{MovableRwLock, RwLock}; diff --git a/library/std/src/sys/unsupported/locks/rwlock.rs b/library/std/src/sys/unsupported/locks/rwlock.rs index 8438adeb5b533..14fd351314c17 100644 --- a/library/std/src/sys/unsupported/locks/rwlock.rs +++ b/library/std/src/sys/unsupported/locks/rwlock.rs @@ -1,18 +1,18 @@ use crate::cell::Cell; -pub struct RWLock { +pub struct RwLock { // This platform has no threads, so we can use a Cell here. mode: Cell, } -pub type MovableRWLock = RWLock; +pub type MovableRwLock = RwLock; -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} // no threads on this platform +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} // no threads on this platform -impl RWLock { - pub const fn new() -> RWLock { - RWLock { mode: Cell::new(0) } +impl RwLock { + pub const fn new() -> RwLock { + RwLock { mode: Cell::new(0) } } #[inline] diff --git a/library/std/src/sys/wasm/atomics/rwlock.rs b/library/std/src/sys/wasm/atomics/rwlock.rs index 1cca809764c8c..690bb155e1a27 100644 --- a/library/std/src/sys/wasm/atomics/rwlock.rs +++ b/library/std/src/sys/wasm/atomics/rwlock.rs @@ -1,13 +1,13 @@ use crate::cell::UnsafeCell; use crate::sys::locks::{Condvar, Mutex}; -pub struct RWLock { +pub struct RwLock { lock: Mutex, cond: Condvar, state: UnsafeCell, } -pub type MovableRWLock = RWLock; +pub type MovableRwLock = RwLock; enum State { Unlocked, @@ -15,8 +15,8 @@ enum State { Writing, } -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} // This rwlock implementation is a relatively simple implementation which has a // condition variable for readers/writers as well as a mutex protecting the @@ -26,9 +26,9 @@ unsafe impl Sync for RWLock {} // hopefully correct this implementation is very likely to want to be changed in // the future. -impl RWLock { - pub const fn new() -> RWLock { - RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } +impl RwLock { + pub const fn new() -> RwLock { + RwLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) } } #[inline] diff --git a/library/std/src/sys/windows/locks/mod.rs b/library/std/src/sys/windows/locks/mod.rs index 5634f10633963..35bd59130346f 100644 --- a/library/std/src/sys/windows/locks/mod.rs +++ b/library/std/src/sys/windows/locks/mod.rs @@ -3,4 +3,4 @@ mod mutex; mod rwlock; pub use condvar::{Condvar, MovableCondvar}; pub use mutex::{MovableMutex, Mutex, ReentrantMutex}; -pub use rwlock::{MovableRWLock, RWLock}; +pub use rwlock::{MovableRwLock, RwLock}; diff --git a/library/std/src/sys/windows/locks/rwlock.rs b/library/std/src/sys/windows/locks/rwlock.rs index b7a5b1e7accd0..12906652e0b71 100644 --- a/library/std/src/sys/windows/locks/rwlock.rs +++ b/library/std/src/sys/windows/locks/rwlock.rs @@ -1,18 +1,18 @@ use crate::cell::UnsafeCell; use crate::sys::c; -pub struct RWLock { +pub struct RwLock { inner: UnsafeCell, } -pub type MovableRWLock = RWLock; +pub type MovableRwLock = RwLock; -unsafe impl Send for RWLock {} -unsafe impl Sync for RWLock {} +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} -impl RWLock { - pub const fn new() -> RWLock { - RWLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } +impl RwLock { + pub const fn new() -> RwLock { + RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) } } #[inline] pub unsafe fn read(&self) { diff --git a/library/std/src/sys_common/rwlock.rs b/library/std/src/sys_common/rwlock.rs index eaee631270157..12e7a72a344dc 100644 --- a/library/std/src/sys_common/rwlock.rs +++ b/library/std/src/sys_common/rwlock.rs @@ -4,14 +4,14 @@ use crate::sys::locks as imp; /// /// This rwlock does not implement poisoning. /// -/// This rwlock has a const constructor ([`StaticRWLock::new`]), does not +/// This rwlock has a const constructor ([`StaticRwLock::new`]), does not /// implement `Drop` to cleanup resources. -pub struct StaticRWLock(imp::RWLock); +pub struct StaticRwLock(imp::RwLock); -impl StaticRWLock { +impl StaticRwLock { /// Creates a new rwlock for use. pub const fn new() -> Self { - Self(imp::RWLock::new()) + Self(imp::RwLock::new()) } /// Acquires shared access to the underlying lock, blocking the current @@ -19,9 +19,9 @@ impl StaticRWLock { /// /// The lock is automatically unlocked when the returned guard is dropped. #[inline] - pub fn read(&'static self) -> StaticRWLockReadGuard { + pub fn read(&'static self) -> StaticRwLockReadGuard { unsafe { self.0.read() }; - StaticRWLockReadGuard(&self.0) + StaticRwLockReadGuard(&self.0) } /// Acquires write access to the underlying lock, blocking the current thread @@ -29,16 +29,16 @@ impl StaticRWLock { /// /// The lock is automatically unlocked when the returned guard is dropped. #[inline] - pub fn write(&'static self) -> StaticRWLockWriteGuard { + pub fn write(&'static self) -> StaticRwLockWriteGuard { unsafe { self.0.write() }; - StaticRWLockWriteGuard(&self.0) + StaticRwLockWriteGuard(&self.0) } } #[must_use] -pub struct StaticRWLockReadGuard(&'static imp::RWLock); +pub struct StaticRwLockReadGuard(&'static imp::RwLock); -impl Drop for StaticRWLockReadGuard { +impl Drop for StaticRwLockReadGuard { #[inline] fn drop(&mut self) { unsafe { @@ -48,9 +48,9 @@ impl Drop for StaticRWLockReadGuard { } #[must_use] -pub struct StaticRWLockWriteGuard(&'static imp::RWLock); +pub struct StaticRwLockWriteGuard(&'static imp::RwLock); -impl Drop for StaticRWLockWriteGuard { +impl Drop for StaticRwLockWriteGuard { #[inline] fn drop(&mut self) { unsafe { @@ -66,15 +66,15 @@ impl Drop for StaticRWLockWriteGuard { /// /// This rwlock does not implement poisoning. /// -/// This is either a wrapper around `Box` or `imp::RWLock`, -/// depending on the platform. It is boxed on platforms where `imp::RWLock` may +/// This is either a wrapper around `Box` or `imp::RwLock`, +/// depending on the platform. It is boxed on platforms where `imp::RwLock` may /// not be moved. -pub struct MovableRWLock(imp::MovableRWLock); +pub struct MovableRwLock(imp::MovableRwLock); -impl MovableRWLock { +impl MovableRwLock { /// Creates a new reader-writer lock for use. pub fn new() -> Self { - Self(imp::MovableRWLock::from(imp::RWLock::new())) + Self(imp::MovableRwLock::from(imp::RwLock::new())) } /// Acquires shared access to the underlying lock, blocking the current @@ -127,7 +127,7 @@ impl MovableRWLock { } } -impl Drop for MovableRWLock { +impl Drop for MovableRwLock { fn drop(&mut self) { unsafe { self.0.destroy() }; } diff --git a/src/bootstrap/setup.rs b/src/bootstrap/setup.rs index aff2b6c3cbfdb..b730730854f14 100644 --- a/src/bootstrap/setup.rs +++ b/src/bootstrap/setup.rs @@ -342,7 +342,7 @@ undesirable, simply delete the `pre-push` file from .git/hooks." dst.display(), e ), - Ok(_) => println!("Linked `src/etc/pre-commit.sh` to `.git/hooks/pre-push`"), + Ok(_) => println!("Linked `src/etc/pre-push.sh` to `.git/hooks/pre-push`"), }; } else { println!("Ok, skipping installation!"); diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs index 30d9665dd0f4a..defb1e4d83bae 100644 --- a/src/bootstrap/util.rs +++ b/src/bootstrap/util.rs @@ -308,10 +308,10 @@ pub fn is_valid_test_suite_arg<'a, P: AsRef>( let abs_path = builder.src.join(path); let exists = abs_path.is_dir() || abs_path.is_file(); if !exists { - if let Some(p) = abs_path.to_str() { - builder.info(&format!("Warning: Skipping \"{}\": not a regular file or directory", p)); - } - return None; + panic!( + "Invalid test suite filter \"{}\": file or directory does not exist", + abs_path.display() + ); } // Since test suite paths are themselves directories, if we don't // specify a directory or file, we'll get an empty string here diff --git a/src/test/mir-opt/derefer_test.main.Derefer.diff b/src/test/mir-opt/derefer_test.main.Derefer.diff new file mode 100644 index 0000000000000..e9a45656ebf8d --- /dev/null +++ b/src/test/mir-opt/derefer_test.main.Derefer.diff @@ -0,0 +1,60 @@ +- // MIR for `main` before Derefer ++ // MIR for `main` after Derefer + + fn main() -> () { + let mut _0: (); // return place in scope 0 at $DIR/derefer_test.rs:2:11: 2:11 + let mut _1: (i32, i32); // in scope 0 at $DIR/derefer_test.rs:3:9: 3:14 + let mut _3: &mut (i32, i32); // in scope 0 at $DIR/derefer_test.rs:4:22: 4:28 ++ let mut _6: &mut (i32, i32); // in scope 0 at $DIR/derefer_test.rs:5:13: 5:26 ++ let mut _7: &mut (i32, i32); // in scope 0 at $DIR/derefer_test.rs:6:13: 6:26 + scope 1 { + debug a => _1; // in scope 1 at $DIR/derefer_test.rs:3:9: 3:14 + let mut _2: (i32, &mut (i32, i32)); // in scope 1 at $DIR/derefer_test.rs:4:9: 4:14 + scope 2 { + debug b => _2; // in scope 2 at $DIR/derefer_test.rs:4:9: 4:14 + let _4: &mut i32; // in scope 2 at $DIR/derefer_test.rs:5:9: 5:10 + scope 3 { + debug x => _4; // in scope 3 at $DIR/derefer_test.rs:5:9: 5:10 + let _5: &mut i32; // in scope 3 at $DIR/derefer_test.rs:6:9: 6:10 + scope 4 { + debug y => _5; // in scope 4 at $DIR/derefer_test.rs:6:9: 6:10 + } + } + } + } + + bb0: { + StorageLive(_1); // scope 0 at $DIR/derefer_test.rs:3:9: 3:14 + (_1.0: i32) = const 42_i32; // scope 0 at $DIR/derefer_test.rs:3:17: 3:24 + (_1.1: i32) = const 43_i32; // scope 0 at $DIR/derefer_test.rs:3:17: 3:24 + StorageLive(_2); // scope 1 at $DIR/derefer_test.rs:4:9: 4:14 + StorageLive(_3); // scope 1 at $DIR/derefer_test.rs:4:22: 4:28 + _3 = &mut _1; // scope 1 at $DIR/derefer_test.rs:4:22: 4:28 + (_2.0: i32) = const 99_i32; // scope 1 at $DIR/derefer_test.rs:4:17: 4:29 + (_2.1: &mut (i32, i32)) = move _3; // scope 1 at $DIR/derefer_test.rs:4:17: 4:29 + StorageDead(_3); // scope 1 at $DIR/derefer_test.rs:4:28: 4:29 + StorageLive(_4); // scope 2 at $DIR/derefer_test.rs:5:9: 5:10 +- _4 = &mut ((*(_2.1: &mut (i32, i32))).0: i32); // scope 2 at $DIR/derefer_test.rs:5:13: 5:26 ++ StorageLive(_6); // scope 2 at $DIR/derefer_test.rs:5:13: 5:26 ++ _6 = move (_2.1: &mut (i32, i32)); // scope 2 at $DIR/derefer_test.rs:5:13: 5:26 ++ _4 = &mut ((*_6).0: i32); // scope 2 at $DIR/derefer_test.rs:5:13: 5:26 ++ StorageDead(_6); // scope 3 at $DIR/derefer_test.rs:6:9: 6:10 + StorageLive(_5); // scope 3 at $DIR/derefer_test.rs:6:9: 6:10 +- _5 = &mut ((*(_2.1: &mut (i32, i32))).1: i32); // scope 3 at $DIR/derefer_test.rs:6:13: 6:26 ++ StorageLive(_7); // scope 3 at $DIR/derefer_test.rs:6:13: 6:26 ++ _7 = move (_2.1: &mut (i32, i32)); // scope 3 at $DIR/derefer_test.rs:6:13: 6:26 ++ _5 = &mut ((*_7).1: i32); // scope 3 at $DIR/derefer_test.rs:6:13: 6:26 ++ StorageDead(_7); // scope 0 at $DIR/derefer_test.rs:2:11: 7:2 + _0 = const (); // scope 0 at $DIR/derefer_test.rs:2:11: 7:2 + StorageDead(_5); // scope 3 at $DIR/derefer_test.rs:7:1: 7:2 + StorageDead(_4); // scope 2 at $DIR/derefer_test.rs:7:1: 7:2 + StorageDead(_2); // scope 1 at $DIR/derefer_test.rs:7:1: 7:2 + StorageDead(_1); // scope 0 at $DIR/derefer_test.rs:7:1: 7:2 + return; // scope 0 at $DIR/derefer_test.rs:7:2: 7:2 ++ } ++ ++ bb1 (cleanup): { ++ resume; // scope 0 at $DIR/derefer_test.rs:2:1: 7:2 + } + } + diff --git a/src/test/mir-opt/derefer_test.rs b/src/test/mir-opt/derefer_test.rs new file mode 100644 index 0000000000000..2ebc0d343bd74 --- /dev/null +++ b/src/test/mir-opt/derefer_test.rs @@ -0,0 +1,7 @@ +// EMIT_MIR derefer_test.main.Derefer.diff +fn main() { + let mut a = (42,43); + let mut b = (99, &mut a); + let x = &mut (*b.1).0; + let y = &mut (*b.1).1; +} diff --git a/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.b.Inline.after.mir b/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.b.Inline.after.mir index 9264d41554ae3..a18ff0e35fe9a 100644 --- a/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.b.Inline.after.mir +++ b/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.b.Inline.after.mir @@ -10,6 +10,7 @@ fn b(_1: &mut Box) -> &mut T { debug self => _4; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL let mut _5: &mut T; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL let mut _6: &mut T; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + let mut _7: std::boxed::Box; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL } bb0: { @@ -19,7 +20,10 @@ fn b(_1: &mut Box) -> &mut T { _4 = &mut (*_1); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:8:5: 8:15 StorageLive(_5); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL StorageLive(_6); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL - _6 = &mut (*(*_4)); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + StorageLive(_7); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + _7 = move (*_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + _6 = &mut (*_7); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + StorageDead(_7); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL _5 = &mut (*_6); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL _3 = &mut (*_5); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL StorageDead(_6); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL diff --git a/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.d.Inline.after.mir b/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.d.Inline.after.mir index 422bf748d9ffe..d079ba59ffc3d 100644 --- a/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.d.Inline.after.mir +++ b/src/test/mir-opt/inline/issue_58867_inline_as_ref_as_mut.d.Inline.after.mir @@ -7,13 +7,17 @@ fn d(_1: &Box) -> &T { let mut _3: &std::boxed::Box; // in scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15 scope 1 (inlined as AsRef>::as_ref) { // at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15 debug self => _3; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + let mut _4: std::boxed::Box; // in scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL } bb0: { StorageLive(_2); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15 StorageLive(_3); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15 _3 = &(*_1); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15 - _2 = &(*(*_3)); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + StorageLive(_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + _4 = move (*_3); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + _2 = &(*_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL + StorageDead(_4); // scope 1 at $SRC_DIR/alloc/src/boxed.rs:LL:COL _0 = &(*_2); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:5: 18:15 StorageDead(_3); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:18:14: 18:15 StorageDead(_2); // scope 0 at $DIR/issue-58867-inline-as-ref-as-mut.rs:19:1: 19:2 diff --git a/src/test/mir-opt/lower_array_len.array_bound.NormalizeArrayLen.diff b/src/test/mir-opt/lower_array_len.array_bound.NormalizeArrayLen.diff index d6c1c92cd9177..2e03467018693 100644 --- a/src/test/mir-opt/lower_array_len.array_bound.NormalizeArrayLen.diff +++ b/src/test/mir-opt/lower_array_len.array_bound.NormalizeArrayLen.diff @@ -64,5 +64,9 @@ StorageDead(_3); // scope 0 at $DIR/lower_array_len.rs:11:5: 11:6 return; // scope 0 at $DIR/lower_array_len.rs:12:2: 12:2 } + + bb6 (cleanup): { + resume; // scope 0 at $DIR/lower_array_len.rs:6:1: 12:2 + } } diff --git a/src/test/mir-opt/lower_array_len.array_bound_mut.NormalizeArrayLen.diff b/src/test/mir-opt/lower_array_len.array_bound_mut.NormalizeArrayLen.diff index 11fc20aa693c7..6aa77a9ed6013 100644 --- a/src/test/mir-opt/lower_array_len.array_bound_mut.NormalizeArrayLen.diff +++ b/src/test/mir-opt/lower_array_len.array_bound_mut.NormalizeArrayLen.diff @@ -77,5 +77,9 @@ StorageDead(_3); // scope 0 at $DIR/lower_array_len.rs:24:5: 24:6 return; // scope 0 at $DIR/lower_array_len.rs:25:2: 25:2 } + + bb7 (cleanup): { + resume; // scope 0 at $DIR/lower_array_len.rs:17:1: 25:2 + } } diff --git a/src/test/mir-opt/lower_array_len.array_len.NormalizeArrayLen.diff b/src/test/mir-opt/lower_array_len.array_len.NormalizeArrayLen.diff index 892fdda818ebd..b41582477c692 100644 --- a/src/test/mir-opt/lower_array_len.array_len.NormalizeArrayLen.diff +++ b/src/test/mir-opt/lower_array_len.array_len.NormalizeArrayLen.diff @@ -26,5 +26,9 @@ StorageDead(_2); // scope 0 at $DIR/lower_array_len.rs:31:13: 31:14 return; // scope 0 at $DIR/lower_array_len.rs:32:2: 32:2 } + + bb2 (cleanup): { + resume; // scope 0 at $DIR/lower_array_len.rs:30:1: 32:2 + } } diff --git a/src/test/mir-opt/lower_array_len.array_len_by_value.NormalizeArrayLen.diff b/src/test/mir-opt/lower_array_len.array_len_by_value.NormalizeArrayLen.diff index 201fffbf0d45a..92ec7a3633e94 100644 --- a/src/test/mir-opt/lower_array_len.array_len_by_value.NormalizeArrayLen.diff +++ b/src/test/mir-opt/lower_array_len.array_len_by_value.NormalizeArrayLen.diff @@ -26,5 +26,9 @@ StorageDead(_2); // scope 0 at $DIR/lower_array_len.rs:38:13: 38:14 return; // scope 0 at $DIR/lower_array_len.rs:39:2: 39:2 } + + bb2 (cleanup): { + resume; // scope 0 at $DIR/lower_array_len.rs:37:1: 39:2 + } } diff --git a/src/test/mir-opt/lower_slice_len.bound.LowerSliceLenCalls.diff b/src/test/mir-opt/lower_slice_len.bound.LowerSliceLenCalls.diff index 13241d882f210..2210ad54e8d3d 100644 --- a/src/test/mir-opt/lower_slice_len.bound.LowerSliceLenCalls.diff +++ b/src/test/mir-opt/lower_slice_len.bound.LowerSliceLenCalls.diff @@ -59,5 +59,9 @@ StorageDead(_3); // scope 0 at $DIR/lower_slice_len.rs:9:5: 9:6 return; // scope 0 at $DIR/lower_slice_len.rs:10:2: 10:2 } + + bb6 (cleanup): { + resume; // scope 0 at $DIR/lower_slice_len.rs:4:1: 10:2 + } } diff --git a/src/test/mir-opt/uninhabited_fallthrough_elimination.eliminate_fallthrough.UninhabitedEnumBranching.diff b/src/test/mir-opt/uninhabited_fallthrough_elimination.eliminate_fallthrough.UninhabitedEnumBranching.diff index 7e843b65e88fc..868eeb6367e33 100644 --- a/src/test/mir-opt/uninhabited_fallthrough_elimination.eliminate_fallthrough.UninhabitedEnumBranching.diff +++ b/src/test/mir-opt/uninhabited_fallthrough_elimination.eliminate_fallthrough.UninhabitedEnumBranching.diff @@ -9,7 +9,7 @@ bb0: { _2 = discriminant(_1); // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:22:11: 22:12 - switchInt(move _2) -> [1_isize: bb3, 2_isize: bb2, otherwise: bb1]; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:22:5: 22:12 -+ switchInt(move _2) -> [1_isize: bb3, 2_isize: bb2, otherwise: bb5]; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:22:5: 22:12 ++ switchInt(move _2) -> [1_isize: bb3, 2_isize: bb2, otherwise: bb6]; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:22:5: 22:12 } bb1: { @@ -29,9 +29,13 @@ bb4: { return; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:27:2: 27:2 + } + + bb5 (cleanup): { + resume; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:21:1: 27:2 + } + -+ bb5: { ++ bb6: { + unreachable; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:25:14: 25:15 } } diff --git a/src/test/mir-opt/uninhabited_fallthrough_elimination.keep_fallthrough.UninhabitedEnumBranching.diff b/src/test/mir-opt/uninhabited_fallthrough_elimination.keep_fallthrough.UninhabitedEnumBranching.diff index 5da011d427a2c..33c1458dc0c17 100644 --- a/src/test/mir-opt/uninhabited_fallthrough_elimination.keep_fallthrough.UninhabitedEnumBranching.diff +++ b/src/test/mir-opt/uninhabited_fallthrough_elimination.keep_fallthrough.UninhabitedEnumBranching.diff @@ -30,5 +30,9 @@ bb4: { return; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:18:2: 18:2 } + + bb5 (cleanup): { + resume; // scope 0 at $DIR/uninhabited_fallthrough_elimination.rs:12:1: 18:2 + } } diff --git a/src/test/mir-opt/unreachable.main.UnreachablePropagation.diff b/src/test/mir-opt/unreachable.main.UnreachablePropagation.diff index 08312bde20f51..380844f8861f6 100644 --- a/src/test/mir-opt/unreachable.main.UnreachablePropagation.diff +++ b/src/test/mir-opt/unreachable.main.UnreachablePropagation.diff @@ -64,6 +64,10 @@ _0 = const (); // scope 0 at $DIR/unreachable.rs:19:6: 19:6 StorageDead(_1); // scope 0 at $DIR/unreachable.rs:20:1: 20:2 return; // scope 0 at $DIR/unreachable.rs:20:2: 20:2 +- } +- +- bb7 (cleanup): { +- resume; // scope 0 at $DIR/unreachable.rs:8:1: 20:2 } } diff --git a/src/test/mir-opt/unreachable_diverging.main.UnreachablePropagation.diff b/src/test/mir-opt/unreachable_diverging.main.UnreachablePropagation.diff index e5867ccfc5cb6..e26990b1def89 100644 --- a/src/test/mir-opt/unreachable_diverging.main.UnreachablePropagation.diff +++ b/src/test/mir-opt/unreachable_diverging.main.UnreachablePropagation.diff @@ -69,6 +69,10 @@ StorageDead(_1); // scope 0 at $DIR/unreachable_diverging.rs:20:1: 20:2 StorageDead(_2); // scope 0 at $DIR/unreachable_diverging.rs:20:1: 20:2 return; // scope 0 at $DIR/unreachable_diverging.rs:20:2: 20:2 +- } +- +- bb7 (cleanup): { +- resume; // scope 0 at $DIR/unreachable_diverging.rs:12:1: 20:2 } } diff --git a/src/test/run-make/const_fn_mir/dump.mir b/src/test/run-make/const_fn_mir/dump.mir index f02bccc4b2da5..4e8936905c440 100644 --- a/src/test/run-make/const_fn_mir/dump.mir +++ b/src/test/run-make/const_fn_mir/dump.mir @@ -23,6 +23,10 @@ fn foo() -> i32 { _0 = move (_1.0: i32); // scope 0 at main.rs:5:5: 5:10 return; // scope 0 at main.rs:6:2: 6:2 } + + bb2 (cleanup): { + resume; // scope 0 at main.rs:4:1: 6:2 + } } fn main() -> () { diff --git a/src/test/ui/type-alias-impl-trait/multiple_definitions.rs b/src/test/ui/type-alias-impl-trait/multiple_definitions.rs new file mode 100644 index 0000000000000..9e6268e63cde0 --- /dev/null +++ b/src/test/ui/type-alias-impl-trait/multiple_definitions.rs @@ -0,0 +1,30 @@ +// check-pass + +use std::marker::PhantomData; + +pub struct ConcreteError {} +pub trait IoBase {} +struct X {} +impl IoBase for X {} + +pub struct ClusterIterator { + pub fat: B, + phantom_s: PhantomData, + phantom_e: PhantomData, +} + +pub struct FileSystem { + pub disk: IO, +} + +impl FileSystem { + pub fn cluster_iter(&self) -> ClusterIterator { + ClusterIterator { + fat: X {}, + phantom_s: PhantomData::default(), + phantom_e: PhantomData::default(), + } + } +} + +fn main() {}