diff --git a/compiler/rustc_mir_transform/src/instcombine.rs b/compiler/rustc_mir_transform/src/instcombine.rs new file mode 100644 index 0000000000000..caac097a15a77 --- /dev/null +++ b/compiler/rustc_mir_transform/src/instcombine.rs @@ -0,0 +1,440 @@ +use crate::MirPass; +use rustc_data_structures::fx::FxHashSet; +use rustc_index::IndexVec; +use rustc_middle::middle::resolve_bound_vars::Set1; +use rustc_middle::mir::visit::{PlaceContext, Visitor}; +use rustc_middle::mir::*; +use rustc_middle::ty::{ParamEnv, TyCtxt}; +use rustc_session::Session; +use smallvec::SmallVec; + +pub struct InstCombine; + +impl<'tcx> MirPass<'tcx> for InstCombine { + fn is_enabled(&self, sess: &Session) -> bool { + sess.mir_opt_level() >= 1 + } + + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); + while did_optimization(body, tcx, param_env) {} + } +} + +struct Context<'tcx, 'a> { + tcx: TyCtxt<'tcx>, + local_decls: &'a LocalDecls<'tcx>, + param_env: ParamEnv<'tcx>, +} + +// In the nomenclature of our function signatures, our optimizations are all +// Combine: +// temp_place = temp_rvalue; +// final_place = final_rvalue; +// +// Combine: +// _2 = &_1; +// _3 = *_2; +// Into: +// _3 = _1; +// +// This transformation is correct because our analysis guarantees that these are the only uses of +// _2 (the temporary). +fn combine_ref_deref<'tcx, 'a>( + cx: &Context<'tcx, 'a>, + temp_place: &Place<'tcx>, + temp_rvalue: &Rvalue<'tcx>, + final_place: &Place<'tcx>, + final_rvalue: &Rvalue<'tcx>, +) -> Option> { + let Rvalue::Ref(_, _, first_place) = temp_rvalue else { + return None; + }; + let Rvalue::Use(final_operand) = final_rvalue else { + return None; + }; + + let second_place = final_operand.place()?; + if second_place.projection.get(0) != Some(&ProjectionElem::Deref) { + return None; + } + + assert_eq!(Some(second_place.local), temp_place.as_local()); + + let new_place = first_place.project_deeper(&second_place.projection[1..], cx.tcx); + + if new_place == *final_place { + Some(StatementKind::Nop) + } else { + Some(StatementKind::Assign(Box::new((*final_place, Rvalue::Use(Operand::Copy(new_place)))))) + } +} + +// FIXME: Justify the optimization +// Combine: +// _2 = &mut? _1; +// _3 = &raw mut? *_2; +// Into: +// _3 = &raw mut? _1; +fn combine_ref_addressof<'tcx, 'a>( + cx: &Context<'tcx, 'a>, + temp_place: &Place<'tcx>, + temp_rvalue: &Rvalue<'tcx>, + final_place: &Place<'tcx>, + final_rvalue: &Rvalue<'tcx>, +) -> Option> { + let Rvalue::Ref(_, _, first_place) = temp_rvalue else { + return None; + }; + let Rvalue::AddressOf(mutability, second_place) = final_rvalue else { + return None; + }; + + if second_place.projection.get(0) != Some(&ProjectionElem::Deref) { + return None; + } + + assert_eq!(second_place.local, temp_place.local); + + let new_place = first_place.project_deeper(&second_place.projection[1..], cx.tcx); + + Some(StatementKind::Assign(Box::new((*final_place, Rvalue::AddressOf(*mutability, new_place))))) +} + +// FIXME: Justify the optimization +// Combine: +// _2 = _1.a; +// _3 = _2.b; +// Into: +// _3 = _1.a.b; +fn combine_place_projections<'tcx, 'a>( + cx: &Context<'tcx, 'a>, + temp_place: &Place<'tcx>, + temp_rvalue: &Rvalue<'tcx>, + final_place: &Place<'tcx>, + final_rvalue: &Rvalue<'tcx>, +) -> Option> { + let Rvalue::Use(temp_operand) = temp_rvalue else { + return None; + }; + let Rvalue::Use(final_operand) = final_rvalue else { + return None; + }; + + // Both the operands needs to be places (is it even possible for one to be a Constant?) + let first_place = temp_operand.place()?; + let second_place = final_operand.place()?; + + // If we are assigning into a place expression, that would be something like + // _3.b = _2.b; + // Which is complicated. Just don't optimize that at all for now. + if !temp_place.projection.is_empty() { + return None; + } + + // Derefs must come first if at all + // If merging these assignments would break that rule, bummer. Bail. + if first_place.projection.len() > 0 + && second_place.projection.get(0) == Some(&ProjectionElem::Deref) + { + return None; + } + + // See: rust-lang/rust#11518 + // If the temporary has a niche but the final does not, doing this optimization will destroy + // the niche information. + // This check is _extremely_ cautious, we only do this optimization if we are absolutely + // certain that the temporary does not have a niche. + // Note that if the second assignment does not add any projections, layout can't change, so we + // don't need this check. + if !second_place.projection.is_empty() { + let temporary_ty = temp_place.ty(cx.local_decls, cx.tcx).ty; + let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(temporary_ty)) else { + return None; + }; + if layout.layout.largest_niche().is_some() { + return None; + } + } + + assert_eq!(second_place.local, temp_place.local); + + let new_place = first_place.project_deeper(&second_place.projection[..], cx.tcx); + + if new_place == *final_place { + Some(StatementKind::Nop) + } else if temp_operand.is_move() && final_operand.is_move() { + // If the Operand in the second statement is Move, the Place it refers to may be unsized, + // which would be wrong to copy, so we need to emit Operand::Move. + Some(StatementKind::Assign(Box::new((*final_place, Rvalue::Use(Operand::Move(new_place)))))) + } else { + Some(StatementKind::Assign(Box::new((*final_place, Rvalue::Use(Operand::Copy(new_place)))))) + } +} + +// FIXME: Justify the optimization +// Combine: +// _2 = _1 as *const T; +// _3 = _2 as *const U; +// Into: +// _3 = _1 as *const U; +fn combine_ptr_ptr_cast<'tcx, 'a>( + _cx: &Context<'tcx, 'a>, + temp_place: &Place<'tcx>, + temp_rvalue: &Rvalue<'tcx>, + final_place: &Place<'tcx>, + final_rvalue: &Rvalue<'tcx>, +) -> Option> { + if !temp_place.projection.is_empty() { + return None; + } + let Rvalue::Cast(CastKind::PtrToPtr, temp_operand, _temp_ty) = temp_rvalue else { + return None; + }; + let Rvalue::Cast(CastKind::PtrToPtr, final_operand, final_ty) = final_rvalue else { + return None; + }; + + assert_eq!(Some(final_operand.place()?.local), temp_place.as_local()); + + Some(StatementKind::Assign(Box::new(( + *final_place, + Rvalue::Cast(CastKind::PtrToPtr, temp_operand.clone(), *final_ty), + )))) +} + +fn did_optimization<'tcx>( + body: &mut Body<'tcx>, + tcx: TyCtxt<'tcx>, + param_env: ParamEnv<'tcx>, +) -> bool { + let mut visitor = AnalysisVisitor { + analysis: IndexVec::from_elem_n(Analysis::default(), body.local_decls.len()), + }; + visitor.visit_body(body); + let analysis = visitor.analysis; + + let mut invalidated_statements = FxHashSet::default(); + + 'outer: for (_local, result) in analysis.iter_enumerated() { + let Some((temp_loc, (temp_place, temp_rvalue))) = as_assign(result.write, body) else { + continue; + }; + let Some((final_loc, (final_place, final_rvalue))) = as_assign(result.read, body) else { + continue; + }; + // We only apply this optimization within a block, this means that we get to skip all + // reasoning about flow control in our analysis. + if temp_loc.block != final_loc.block { + continue; + } + + // If this is a single statement, it's an assignment that modifies a variable based on + // itself, such as an AddAssign impl. + if temp_loc == final_loc { + continue; + } + + // If the creation of the temporary comes after the use of it, then by whatever means this + // is not the pattern we are looking for. + if temp_loc.statement_index > final_loc.statement_index { + continue; + } + + if invalidated_statements.contains(&temp_loc) || invalidated_statements.contains(&final_loc) + { + continue; + } + + // We only do optimizations where there are no statements other than storage markers + // between the two statements. This is our proxy for alias analysis: if there are no + // statements between the creation of the temporary and its use, nothing can + // observe us eliding the temporary. + let statements_are_adjacent = &body.basic_blocks[temp_loc.block].statements + [temp_loc.statement_index + 1..final_loc.statement_index] + .iter() + .all(|s| { + matches!( + &s.kind, + StatementKind::Nop + | StatementKind::StorageLive(_) + | StatementKind::StorageDead(_) + ) + }); + if !statements_are_adjacent { + continue; + } + + let cx = Context { tcx, local_decls: &body.local_decls, param_env }; + for opt in &[ + combine_place_projections, + combine_ref_deref, + combine_ptr_ptr_cast, + combine_ref_addressof, + ] { + let Some(new_statement) = opt(&cx, temp_place, temp_rvalue, final_place, final_rvalue) + else { + continue; + }; + + invalidated_statements.insert(temp_loc); + invalidated_statements.insert(final_loc); + + let statements = + &mut body.basic_blocks.as_mut_preserves_cfg()[temp_loc.block].statements; + + debug!( + "Combine:\n{:?}\n{:?}\n{:?}\n", + &statements[temp_loc.statement_index], + &statements[final_loc.statement_index], + &new_statement, + ); + + // We need to handle situations like this: + // + // _2 = _1 as *const u8; + // StorageDead(_1); + // StorageLive(_3); + // _3 = _2 as *const (); + // + // What we want to do is replace one of these assignments with + // + // _3 = _1 as *const (); + // + // But if we only replace one of the two assignments we analyze with our new + // statement, we will use a local outside of its liveness range. + // To deal with this, we remove the two original statements and all storage markers + // for locals in the new statement, and consider the original locations of these + // statements to be free slots in the block. + // Then we insert all our removed StorageLive statements into the first free slots, + // our StorageDead statements into the last slots, and our new statement somewhere in + // the middle. + // This ensures that we do not change the location of any statements that we have not + // optimized, which minimizes the amount of our analysis that we have invalidated. + + statements[temp_loc.statement_index].make_nop(); + let mut statement = statements[final_loc.statement_index].replace_nop(); + statement.kind = new_statement; + + let locals = find_locals(&statement); + + let mut storage_live = Vec::new(); + let mut storage_dead = Vec::new(); + let mut slots = vec![temp_loc.statement_index, final_loc.statement_index]; + for (s, statement) in statements.iter_mut().enumerate() { + match statement.kind { + StatementKind::StorageLive(l) => { + if locals.contains(&l) { + storage_live.push(statement.replace_nop()); + slots.push(s); + } + } + StatementKind::StorageDead(l) => { + if locals.contains(&l) { + storage_dead.push(statement.replace_nop()); + slots.push(s); + } + } + _ => {} + } + } + slots.sort(); + + assert!(slots.len() >= storage_live.len() + 1 + storage_dead.len()); + for (slot, statement) in slots.iter().zip(storage_live.into_iter().chain([statement])) { + assert!(matches!(statements[*slot].kind, StatementKind::Nop)); + statements[*slot] = statement; + } + for (slot, statement) in slots.iter().rev().zip(storage_dead.into_iter().rev()) { + assert!(matches!(statements[*slot].kind, StatementKind::Nop)); + statements[*slot] = statement; + } + + continue 'outer; + } + } + + !invalidated_statements.is_empty() +} + +fn find_locals(statement: &Statement<'_>) -> SmallVec<[Local; 4]> { + struct LocalCollector { + locals: SmallVec<[Local; 4]>, + } + impl Visitor<'_> for LocalCollector { + fn visit_local(&mut self, local: Local, _context: PlaceContext, _location: Location) { + self.locals.push(local); + } + } + + let mut visitor = LocalCollector { locals: SmallVec::new() }; + visitor.visit_statement(statement, Location::START); + visitor.locals +} + +fn as_assign<'a, 'tcx>( + result: Set1, + body: &'a Body<'tcx>, +) -> Option<(Location, &'a (Place<'tcx>, Rvalue<'tcx>))> { + let location = match result { + Set1::One(location) => location, + Set1::Empty | Set1::Many => return None, + }; + + body.stmt_at(location).left()?.kind.as_assign().map(|res| (location, res)) +} + +#[derive(Debug, Clone)] +struct Analysis { + read: Set1, + write: Set1, +} + +impl Default for Analysis { + fn default() -> Self { + Self { read: Set1::Empty, write: Set1::Empty } + } +} + +struct AnalysisVisitor { + analysis: IndexVec, +} + +impl<'tcx> Visitor<'tcx> for AnalysisVisitor { + fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) { + use rustc_middle::mir::visit::MutatingUseContext; + match context { + PlaceContext::NonUse(_) => {} + PlaceContext::MutatingUse(MutatingUseContext::Store) => { + self.analysis[place.local].write.insert(location); + } + PlaceContext::MutatingUse( + MutatingUseContext::Borrow | MutatingUseContext::AddressOf, + ) => { + // FIXME: Explain why we consider this a read + self.analysis[place.local].read.insert(location); + } + PlaceContext::MutatingUse(_) => { + self.analysis[place.local].write.insert(location); + } + PlaceContext::NonMutatingUse(_) => { + self.analysis[place.local].read.insert(location); + } + } + + for elem in place.projection { + if let ProjectionElem::Index(local) = elem { + self.analysis[local].read.insert(location); + } + } + + self.super_place(place, context, location); + } + + fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { + if let TerminatorKind::Return = terminator.kind { + self.analysis[RETURN_PLACE].read.insert(location); + } + self.super_terminator(terminator, location); + } +} diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 68b8911824c2e..94d792c3c80a4 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -82,6 +82,7 @@ mod ffi_unwind_calls; mod function_item_references; mod gvn; pub mod inline; +mod instcombine; mod instsimplify; mod jump_threading; mod large_enums; @@ -598,6 +599,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &simplify_comparison_integral::SimplifyComparisonIntegral, &dead_store_elimination::DeadStoreElimination, &dest_prop::DestinationPropagation, + &instcombine::InstCombine, &o1(simplify_branches::SimplifyConstCondition::Final), &o1(remove_noop_landing_pads::RemoveNoopLandingPads), &o1(simplify::SimplifyCfg::Final), diff --git a/tests/mir-opt/casts.roundtrip.PreCodegen.after.mir b/tests/mir-opt/casts.roundtrip.PreCodegen.after.mir new file mode 100644 index 0000000000000..dad198ce0a7be --- /dev/null +++ b/tests/mir-opt/casts.roundtrip.PreCodegen.after.mir @@ -0,0 +1,11 @@ +// MIR for `roundtrip` after PreCodegen + +fn roundtrip(_1: *const u8) -> *const u8 { + debug x => _1; + let mut _0: *const u8; + + bb0: { + _0 = _1 as *const u8 (PtrToPtr); + return; + } +} diff --git a/tests/mir-opt/instcombine/place_projection.place_projection.InstCombine.diff b/tests/mir-opt/instcombine/place_projection.place_projection.InstCombine.diff new file mode 100644 index 0000000000000..e7f2fb346c015 --- /dev/null +++ b/tests/mir-opt/instcombine/place_projection.place_projection.InstCombine.diff @@ -0,0 +1,22 @@ +- // MIR for `place_projection` before InstCombine ++ // MIR for `place_projection` after InstCombine + + fn place_projection(_1: Outer) -> u8 { + debug o => _1; + let mut _0: u8; + let _2: Inner; + scope 1 { + debug temp => _2; + } + + bb0: { + StorageLive(_2); +- _2 = move (_1.0: Inner); +- _0 = (_2.0: u8); ++ _0 = ((_1.0: Inner).0: u8); ++ nop; + StorageDead(_2); + return; + } + } + diff --git a/tests/mir-opt/instcombine/place_projection.rs b/tests/mir-opt/instcombine/place_projection.rs new file mode 100644 index 0000000000000..176ee8c4c14c2 --- /dev/null +++ b/tests/mir-opt/instcombine/place_projection.rs @@ -0,0 +1,17 @@ +// skip-filecheck +// unit-test: InstCombine +#![crate_type = "lib"] + +pub struct Outer { + inner: Inner, +} + +struct Inner { + field: u8, +} + +// EMIT_MIR place_projection.place_projection.InstCombine.diff +pub fn place_projection(o: Outer) -> u8 { + let temp = o.inner; + temp.field +} diff --git a/tests/mir-opt/instcombine/ptr_cast.ptr_cast.InstCombine.diff b/tests/mir-opt/instcombine/ptr_cast.ptr_cast.InstCombine.diff new file mode 100644 index 0000000000000..80a8a3219d2e3 --- /dev/null +++ b/tests/mir-opt/instcombine/ptr_cast.ptr_cast.InstCombine.diff @@ -0,0 +1,23 @@ +- // MIR for `ptr_cast` before InstCombine ++ // MIR for `ptr_cast` after InstCombine + + fn ptr_cast(_1: *const u8) -> *mut () { + debug p => _1; + let mut _0: *mut (); + let mut _2: *mut u8; + let mut _3: *const u8; + + bb0: { + StorageLive(_2); + StorageLive(_3); + _3 = _1; +- _2 = move _3 as *mut u8 (PtrToPtr); ++ _0 = move _3 as *mut () (PtrToPtr); ++ nop; + StorageDead(_3); +- _0 = move _2 as *mut () (PtrToPtr); + StorageDead(_2); + return; + } + } + diff --git a/tests/mir-opt/instcombine/ptr_cast.rs b/tests/mir-opt/instcombine/ptr_cast.rs new file mode 100644 index 0000000000000..cfd83d5723614 --- /dev/null +++ b/tests/mir-opt/instcombine/ptr_cast.rs @@ -0,0 +1,8 @@ +// skip-filecheck +// unit-test: InstCombine +#![crate_type = "lib"] + +// EMIT_MIR ptr_cast.ptr_cast.InstCombine.diff +pub fn ptr_cast(p: *const u8) -> *mut () { + p as *mut u8 as *mut () +} diff --git a/tests/mir-opt/instcombine/ref_addressof.ref_addressof.InstCombine.diff b/tests/mir-opt/instcombine/ref_addressof.ref_addressof.InstCombine.diff new file mode 100644 index 0000000000000..e000a12df4031 --- /dev/null +++ b/tests/mir-opt/instcombine/ref_addressof.ref_addressof.InstCombine.diff @@ -0,0 +1,44 @@ +- // MIR for `ref_addressof` before InstCombine ++ // MIR for `ref_addressof` after InstCombine + + fn ref_addressof(_1: T) -> () { + debug t => _1; + let mut _0: (); + let _2: &T; + let _4: (); + let mut _5: *const T; + scope 1 { + debug r => _2; + let _3: *const T; + scope 2 { + debug ptr => _3; + } + } + + bb0: { + StorageLive(_2); +- _2 = &_1; + StorageLive(_3); +- _3 = &raw const (*_2); ++ _3 = &raw const _1; ++ nop; + StorageLive(_4); + StorageLive(_5); + _5 = _3; + _4 = std::mem::drop::<*const T>(move _5) -> [return: bb1, unwind unreachable]; + } + + bb1: { + StorageDead(_5); + StorageDead(_4); + _0 = const (); + StorageDead(_3); + StorageDead(_2); + drop(_1) -> [return: bb2, unwind unreachable]; + } + + bb2: { + return; + } + } + diff --git a/tests/mir-opt/instcombine/ref_addressof.rs b/tests/mir-opt/instcombine/ref_addressof.rs new file mode 100644 index 0000000000000..09a65b7f14cb2 --- /dev/null +++ b/tests/mir-opt/instcombine/ref_addressof.rs @@ -0,0 +1,10 @@ +// skip-filecheck +// unit-test: InstCombine +#![crate_type = "lib"] + +// EMIT_MIR ref_addressof.ref_addressof.InstCombine.diff +pub fn ref_addressof(t: T) { + let r = &t; + let ptr = std::ptr::addr_of!(*r); + drop(ptr); +} diff --git a/tests/mir-opt/instcombine/ref_deref.ref_deref.InstCombine.diff b/tests/mir-opt/instcombine/ref_deref.ref_deref.InstCombine.diff new file mode 100644 index 0000000000000..5af842a36eaeb --- /dev/null +++ b/tests/mir-opt/instcombine/ref_deref.ref_deref.InstCombine.diff @@ -0,0 +1,22 @@ +- // MIR for `ref_deref` before InstCombine ++ // MIR for `ref_deref` after InstCombine + + fn ref_deref(_1: T) -> T { + debug t => _1; + let mut _0: T; + let _2: &T; + scope 1 { + debug r => _2; + } + + bb0: { + StorageLive(_2); +- _2 = &_1; +- _0 = (*_2); ++ _0 = _1; ++ nop; + StorageDead(_2); + return; + } + } + diff --git a/tests/mir-opt/instcombine/ref_deref.rs b/tests/mir-opt/instcombine/ref_deref.rs new file mode 100644 index 0000000000000..c6254eccdbf19 --- /dev/null +++ b/tests/mir-opt/instcombine/ref_deref.rs @@ -0,0 +1,9 @@ +// skip-filecheck +// unit-test: InstCombine +#![crate_type = "lib"] + +// EMIT_MIR ref_deref.ref_deref.InstCombine.diff +pub fn ref_deref(t: T) -> T { + let r = &t; + *r +} diff --git a/tests/mir-opt/nested_getter.outer_get.InstCombine.diff b/tests/mir-opt/nested_getter.outer_get.InstCombine.diff new file mode 100644 index 0000000000000..fa7fc3fb1661f --- /dev/null +++ b/tests/mir-opt/nested_getter.outer_get.InstCombine.diff @@ -0,0 +1,18 @@ +- // MIR for `outer_get` before InstCombine ++ // MIR for `outer_get` after InstCombine + + fn outer_get(_1: &Outer) -> u8 { + debug this => _1; + let mut _0: u8; + let mut _2: &Inner; + let _3: &Inner; + scope 1 (inlined inner_get) { + debug this => &((*_1).0: Inner); + } + + bb0: { + _0 = (((*_1).0: Inner).0: u8); + return; + } + } + diff --git a/tests/mir-opt/pre-codegen/nested_getter.outer_get.PreCodegen.after.mir b/tests/mir-opt/pre-codegen/nested_getter.outer_get.PreCodegen.after.mir new file mode 100644 index 0000000000000..df1b418ddfdff --- /dev/null +++ b/tests/mir-opt/pre-codegen/nested_getter.outer_get.PreCodegen.after.mir @@ -0,0 +1,15 @@ +// MIR for `outer_get` after PreCodegen + +fn outer_get(_1: &Outer) -> u8 { + debug this => _1; + let mut _0: u8; + let _2: &Inner; + scope 1 (inlined inner_get) { + debug this => _2; + } + + bb0: { + _0 = (((*_1).0: Inner).0: u8); + return; + } +} diff --git a/tests/mir-opt/pre-codegen/nested_getter.rs b/tests/mir-opt/pre-codegen/nested_getter.rs new file mode 100644 index 0000000000000..e4c1af20381c6 --- /dev/null +++ b/tests/mir-opt/pre-codegen/nested_getter.rs @@ -0,0 +1,23 @@ +// compile-flags: -O -Cdebuginfo=0 -Zmir-opt-level=2 +// only-64bit +// ignore-debug + +#![crate_type = "lib"] + +pub struct Outer { + inner: Inner, +} + +struct Inner { + inner: u8 +} + +#[inline] +fn inner_get(this: &Inner) -> u8 { + this.inner +} + +// EMIT_MIR nested_getter.outer_get.PreCodegen.after.mir +pub fn outer_get(this: &Outer) -> u8 { + inner_get(&this.inner) +} diff --git a/tests/mir-opt/pre-codegen/simple_swap.rs b/tests/mir-opt/pre-codegen/simple_swap.rs new file mode 100644 index 0000000000000..100893a585e39 --- /dev/null +++ b/tests/mir-opt/pre-codegen/simple_swap.rs @@ -0,0 +1,15 @@ +// compile-flags: -O -C debuginfo=0 -Zmir-opt-level=2 +// only-64bit +// ignore-debug + +#![crate_type = "lib"] + +// EMIT_MIR simple_swap.simple_swap.PreCodegen.after.mir +pub fn simple_swap(x: &mut T, y: &mut T) { + use std::ptr::{read, write}; + unsafe { + let temp = read(x); + write(x, read(y)); + write(y, temp); + } +} diff --git a/tests/mir-opt/pre-codegen/simple_swap.simple_swap.PreCodegen.after.mir b/tests/mir-opt/pre-codegen/simple_swap.simple_swap.PreCodegen.after.mir new file mode 100644 index 0000000000000..55489942446ff --- /dev/null +++ b/tests/mir-opt/pre-codegen/simple_swap.simple_swap.PreCodegen.after.mir @@ -0,0 +1,73 @@ +// MIR for `simple_swap` after PreCodegen + +fn simple_swap(_1: &mut T, _2: &mut T) -> () { + debug x => _1; + debug y => _2; + let mut _0: (); + let mut _3: *const T; + let mut _5: *mut T; + let mut _6: *const T; + let mut _7: *mut T; + let mut _8: T; + scope 1 { + let _4: T; + scope 2 { + debug temp => _4; + scope 6 (inlined std::ptr::read::) { + debug src => _6; + scope 7 { + scope 8 (inlined std::ptr::read::runtime::) { + debug src => _6; + } + } + } + scope 9 (inlined std::ptr::write::) { + debug dst => _5; + debug src => _8; + scope 10 { + scope 11 (inlined std::ptr::write::runtime::) { + debug dst => _5; + } + } + } + scope 12 (inlined std::ptr::write::) { + debug dst => _7; + debug src => _4; + scope 13 { + scope 14 (inlined std::ptr::write::runtime::) { + debug dst => _7; + } + } + } + } + scope 3 (inlined std::ptr::read::) { + debug src => _3; + scope 4 { + scope 5 (inlined std::ptr::read::runtime::) { + debug src => _3; + } + } + } + } + + bb0: { + StorageLive(_3); + _3 = &raw const (*_1); + _4 = (*_3); + StorageDead(_3); + StorageLive(_5); + _5 = &raw mut (*_1); + StorageLive(_8); + StorageLive(_6); + _6 = &raw const (*_2); + (*_5) = (*_6); + StorageDead(_6); + StorageDead(_8); + StorageDead(_5); + StorageLive(_7); + _7 = &raw mut (*_2); + (*_7) = move _4; + StorageDead(_7); + return; + } +} diff --git a/tests/mir-opt/vec_as_ptr.as_ptr.InstCombine.diff b/tests/mir-opt/vec_as_ptr.as_ptr.InstCombine.diff new file mode 100644 index 0000000000000..d7dd448f35f4c --- /dev/null +++ b/tests/mir-opt/vec_as_ptr.as_ptr.InstCombine.diff @@ -0,0 +1,42 @@ +- // MIR for `as_ptr` before InstCombine ++ // MIR for `as_ptr` after InstCombine + + fn as_ptr(_1: &Vec) -> *const i32 { + debug v => _1; + let mut _0: *const i32; + scope 1 (inlined Vec::::as_ptr) { + debug self => _1; + let mut _2: *mut i32; + let mut _3: &alloc::raw_vec::RawVec; + scope 2 (inlined alloc::raw_vec::RawVec::::ptr) { + debug self => _3; + let mut _5: std::ptr::NonNull; + scope 3 (inlined Unique::::as_ptr) { + debug ((self: Unique).0: std::ptr::NonNull) => _5; + debug ((self: Unique).1: std::marker::PhantomData) => const PhantomData::; + scope 4 (inlined NonNull::::as_ptr) { + debug self => _5; + let mut _4: *const i32; + } + } + } + } + + bb0: { + StorageLive(_2); + StorageLive(_3); + _3 = &((*_1).0: alloc::raw_vec::RawVec); + StorageLive(_5); + _5 = ((((*_1).0: alloc::raw_vec::RawVec).0: std::ptr::Unique).0: std::ptr::NonNull); + StorageLive(_4); + _4 = (_5.0: *const i32); + _2 = move _4 as *mut i32 (PtrToPtr); + StorageDead(_4); + StorageDead(_5); + _0 = move _2 as *const i32 (PointerCoercion(MutToConstPointer)); + StorageDead(_3); + StorageDead(_2); + return; + } + } + diff --git a/tests/mir-opt/vec_as_ptr.as_ptr.PreCodegen.after.mir b/tests/mir-opt/vec_as_ptr.as_ptr.PreCodegen.after.mir new file mode 100644 index 0000000000000..1cb5aa753f772 --- /dev/null +++ b/tests/mir-opt/vec_as_ptr.as_ptr.PreCodegen.after.mir @@ -0,0 +1,40 @@ +// MIR for `as_ptr` after PreCodegen + +fn as_ptr(_1: &Vec) -> *const i32 { + debug v => _1; + let mut _0: *const i32; + scope 1 (inlined Vec::::as_ptr) { + debug self => _1; + let mut _2: &alloc::raw_vec::RawVec; + let mut _5: *mut i32; + scope 2 (inlined alloc::raw_vec::RawVec::::ptr) { + debug self => _2; + let mut _3: std::ptr::NonNull; + scope 3 (inlined Unique::::as_ptr) { + debug ((self: Unique).0: std::ptr::NonNull) => _3; + debug ((self: Unique).1: std::marker::PhantomData) => const PhantomData::; + scope 4 (inlined NonNull::::as_ptr) { + debug self => _3; + let mut _4: *const i32; + } + } + } + } + + bb0: { + StorageLive(_5); + StorageLive(_2); + _2 = &((*_1).0: alloc::raw_vec::RawVec); + StorageLive(_3); + _3 = ((((*_1).0: alloc::raw_vec::RawVec).0: std::ptr::Unique).0: std::ptr::NonNull); + StorageLive(_4); + _4 = (_3.0: *const i32); + _5 = move _4 as *mut i32 (PtrToPtr); + StorageDead(_4); + StorageDead(_3); + _0 = move _5 as *const i32 (PointerCoercion(MutToConstPointer)); + StorageDead(_2); + StorageDead(_5); + return; + } +} diff --git a/tests/mir-opt/vec_as_ptr.rs b/tests/mir-opt/vec_as_ptr.rs new file mode 100644 index 0000000000000..a78aa12a97f06 --- /dev/null +++ b/tests/mir-opt/vec_as_ptr.rs @@ -0,0 +1,15 @@ +// skip-filecheck +// compile-flags: -Zmir-opt-level=2 -Zinline-mir +// no-debug + +#![crate_type = "lib"] + +// Theoretically, Vec::as_ptr could be implemented with a single assignment, +// and a long projection. This tests tracks how close we are to that, without +// breaking the companion Vec::as_ptr codegen test. + +// EMIT_MIR vec_as_ptr.as_ptr.InstCombine.diff +// EMIT_MIR vec_as_ptr.as_ptr.PreCodegen.after.mir +pub fn as_ptr(v: &Vec) -> *const i32 { + v.as_ptr() +}