From 7a2b66319ee219e757b6661549f460d8548dcbed Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 24 Jan 2024 11:04:55 +0000 Subject: [PATCH 01/20] interning doesn't check alignment anymroe, because it doesn't do any more projections. --- compiler/rustc_const_eval/src/const_eval/eval_queries.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 6a92ed9717de5..8499de20498ff 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -1,5 +1,3 @@ -use std::mem; - use either::{Left, Right}; use rustc_hir::def::DefKind; @@ -75,9 +73,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( None => InternKind::Constant, } }; - let check_alignment = mem::replace(&mut ecx.machine.check_alignment, CheckAlignment::No); // interning doesn't need to respect alignment intern_const_alloc_recursive(ecx, intern_kind, &ret)?; - ecx.machine.check_alignment = check_alignment; debug!("eval_body_using_ecx done: {:?}", ret); Ok(ret) From b6d0225cafcf7d2421ad943647ed6ef4b8eb7bb4 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 24 Jan 2024 11:05:14 +0000 Subject: [PATCH 02/20] prefer instrumentation over entry/exit tracing statements --- compiler/rustc_const_eval/src/const_eval/eval_queries.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 8499de20498ff..a2d0f1c5583f3 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -22,12 +22,13 @@ use crate::interpret::{ }; // Returns a pointer to where the result lives +#[instrument(level = "trace", skip(ecx, body), ret)] fn eval_body_using_ecx<'mir, 'tcx>( ecx: &mut CompileTimeEvalContext<'mir, 'tcx>, cid: GlobalId<'tcx>, body: &'mir mir::Body<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx>> { - debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env); + trace!(?ecx.param_env); let tcx = *ecx.tcx; assert!( cid.promoted.is_some() @@ -75,7 +76,6 @@ fn eval_body_using_ecx<'mir, 'tcx>( }; intern_const_alloc_recursive(ecx, intern_kind, &ret)?; - debug!("eval_body_using_ecx done: {:?}", ret); Ok(ret) } From a73c44889a6402f13d25fd5b973765ff62fb9885 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 24 Jan 2024 11:32:38 +0000 Subject: [PATCH 03/20] Prefer external iteration now that we don't actually recurse anymore --- .../rustc_const_eval/src/interpret/intern.rs | 38 ++++++++----------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 751fbfacaad00..48920ba384ada 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -41,13 +41,12 @@ pub trait CompileTimeMachine<'mir, 'tcx: 'mir, T> = Machine< /// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be* /// already mutable (as a sanity check). /// -/// `recursive_alloc` is called for all recursively encountered allocations. +/// Returns an iterator over all relocations referred to by this allocation. fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>( ecx: &'rt mut InterpCx<'mir, 'tcx, M>, alloc_id: AllocId, mutability: Mutability, - mut recursive_alloc: impl FnMut(&InterpCx<'mir, 'tcx, M>, CtfeProvenance), -) -> Result<(), ()> { +) -> Result + 'tcx, ()> { trace!("intern_shallow {:?}", alloc_id); // remove allocation let Some((_kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else { @@ -65,14 +64,10 @@ fn intern_shallow<'rt, 'mir, 'tcx, T, M: CompileTimeMachine<'mir, 'tcx, T>>( assert_eq!(alloc.mutability, Mutability::Mut); } } - // record child allocations - for &(_, prov) in alloc.provenance().ptrs().iter() { - recursive_alloc(ecx, prov); - } // link the alloc id to the actual allocation let alloc = ecx.tcx.mk_const_alloc(alloc); ecx.tcx.set_alloc_id_memory(alloc_id, alloc); - Ok(()) + Ok(alloc.0.0.provenance().ptrs().iter().map(|&(_, prov)| prov)) } /// How a constant value should be interned. @@ -128,7 +123,7 @@ pub fn intern_const_alloc_recursive< } }; - // Initialize recursive interning. + // Intern the base allocation, and initialize todo list for recursive interning. let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id(); let mut todo = vec![(base_alloc_id, base_mutability)]; // We need to distinguish "has just been interned" from "was already in `tcx`", @@ -154,7 +149,10 @@ pub fn intern_const_alloc_recursive< continue; } just_interned.insert(alloc_id); - intern_shallow(ecx, alloc_id, mutability, |ecx, prov| { + let provs = intern_shallow(ecx, alloc_id, mutability).map_err(|()| { + ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }) + })?; + for prov in provs { let alloc_id = prov.alloc_id(); if intern_kind != InternKind::Promoted && inner_mutability == Mutability::Not @@ -169,7 +167,7 @@ pub fn intern_const_alloc_recursive< // during interning is to justify why we intern the *new* allocations immutably, // so we can completely ignore existing allocations. We also don't need to add // this to the todo list, since after all it is already interned. - return; + continue; } // Found a mutable pointer inside a const where inner allocations should be // immutable. We exclude promoteds from this, since things like `&mut []` and @@ -189,10 +187,7 @@ pub fn intern_const_alloc_recursive< // okay with losing some potential for immutability here. This can anyway only affect // `static mut`. todo.push((alloc_id, inner_mutability)); - }) - .map_err(|()| { - ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }) - })?; + } } if found_bad_mutable_pointer { return Err(ecx @@ -220,13 +215,13 @@ pub fn intern_const_alloc_for_constprop< return Ok(()); } // Move allocation to `tcx`. - intern_shallow(ecx, alloc_id, Mutability::Not, |_ecx, _| { + for _ in intern_shallow(ecx, alloc_id, Mutability::Not).map_err(|()| err_ub!(DeadLocal))? { // We are not doing recursive interning, so we don't currently support provenance. // (If this assertion ever triggers, we should just implement a // proper recursive interning loop -- or just call `intern_const_alloc_recursive`. panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance") - }) - .map_err(|()| err_ub!(DeadLocal).into()) + } + Ok(()) } impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> @@ -247,15 +242,14 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> let dest = self.allocate(layout, MemoryKind::Stack)?; f(self, &dest.clone().into())?; let alloc_id = dest.ptr().provenance.unwrap().alloc_id(); // this was just allocated, it must have provenance - intern_shallow(self, alloc_id, Mutability::Not, |ecx, prov| { + for prov in intern_shallow(self, alloc_id, Mutability::Not).unwrap() { // We are not doing recursive interning, so we don't currently support provenance. // (If this assertion ever triggers, we should just implement a // proper recursive interning loop -- or just call `intern_const_alloc_recursive`. - if !ecx.tcx.try_get_global_alloc(prov.alloc_id()).is_some() { + if !self.tcx.try_get_global_alloc(prov.alloc_id()).is_some() { panic!("`intern_with_temp_alloc` with nested allocations"); } - }) - .unwrap(); + } Ok(alloc_id) } } From a57a00ebf69722de2944d37de10946cf3aa6fe15 Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Wed, 24 Jan 2024 11:46:57 +0000 Subject: [PATCH 04/20] separately intern the outermost alloc from the rest --- .../rustc_const_eval/src/interpret/intern.rs | 80 +++++++++---------- 1 file changed, 39 insertions(+), 41 deletions(-) diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 48920ba384ada..7621d038855f5 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -125,10 +125,11 @@ pub fn intern_const_alloc_recursive< // Intern the base allocation, and initialize todo list for recursive interning. let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id(); - let mut todo = vec![(base_alloc_id, base_mutability)]; + let mut todo: Vec<_> = + intern_shallow(ecx, base_alloc_id, base_mutability).unwrap().map(|prov| prov).collect(); // We need to distinguish "has just been interned" from "was already in `tcx`", // so we track this in a separate set. - let mut just_interned = FxHashSet::default(); + let mut just_interned: FxHashSet<_> = std::iter::once(base_alloc_id).collect(); // Whether we encountered a bad mutable pointer. // We want to first report "dangling" and then "mutable", so we need to delay reporting these // errors. @@ -142,52 +143,49 @@ pub fn intern_const_alloc_recursive< // raw pointers, so we cannot rely on validation to catch them -- and since interning runs // before validation, and interning doesn't know the type of anything, this means we can't show // better errors. Maybe we should consider doing validation before interning in the future. - while let Some((alloc_id, mutability)) = todo.pop() { + while let Some(prov) = todo.pop() { + let alloc_id = prov.alloc_id(); + if intern_kind != InternKind::Promoted + && inner_mutability == Mutability::Not + && !prov.immutable() + { + if ecx.tcx.try_get_global_alloc(alloc_id).is_some() + && !just_interned.contains(&alloc_id) + { + // This is a pointer to some memory from another constant. We encounter mutable + // pointers to such memory since we do not always track immutability through + // these "global" pointers. Allowing them is harmless; the point of these checks + // during interning is to justify why we intern the *new* allocations immutably, + // so we can completely ignore existing allocations. We also don't need to add + // this to the todo list, since after all it is already interned. + continue; + } + // Found a mutable pointer inside a const where inner allocations should be + // immutable. We exclude promoteds from this, since things like `&mut []` and + // `&None::>` lead to promotion that can produce mutable pointers. We rely + // on the promotion analysis not screwing up to ensure that it is sound to intern + // promoteds as immutable. + found_bad_mutable_pointer = true; + } if ecx.tcx.try_get_global_alloc(alloc_id).is_some() { // Already interned. debug_assert!(!ecx.memory.alloc_map.contains_key(&alloc_id)); continue; } just_interned.insert(alloc_id); - let provs = intern_shallow(ecx, alloc_id, mutability).map_err(|()| { + // We always intern with `inner_mutability`, and furthermore we ensured above that if + // that is "immutable", then there are *no* mutable pointers anywhere in the newly + // interned memory -- justifying that we can indeed intern immutably. However this also + // means we can *not* easily intern immutably here if `prov.immutable()` is true and + // `inner_mutability` is `Mut`: there might be other pointers to that allocation, and + // we'd have to somehow check that they are *all* immutable before deciding that this + // allocation can be made immutable. In the future we could consider analyzing all + // pointers before deciding which allocations can be made immutable; but for now we are + // okay with losing some potential for immutability here. This can anyway only affect + // `static mut`. + todo.extend(intern_shallow(ecx, alloc_id, inner_mutability).map_err(|()| { ecx.tcx.dcx().emit_err(DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }) - })?; - for prov in provs { - let alloc_id = prov.alloc_id(); - if intern_kind != InternKind::Promoted - && inner_mutability == Mutability::Not - && !prov.immutable() - { - if ecx.tcx.try_get_global_alloc(alloc_id).is_some() - && !just_interned.contains(&alloc_id) - { - // This is a pointer to some memory from another constant. We encounter mutable - // pointers to such memory since we do not always track immutability through - // these "global" pointers. Allowing them is harmless; the point of these checks - // during interning is to justify why we intern the *new* allocations immutably, - // so we can completely ignore existing allocations. We also don't need to add - // this to the todo list, since after all it is already interned. - continue; - } - // Found a mutable pointer inside a const where inner allocations should be - // immutable. We exclude promoteds from this, since things like `&mut []` and - // `&None::>` lead to promotion that can produce mutable pointers. We rely - // on the promotion analysis not screwing up to ensure that it is sound to intern - // promoteds as immutable. - found_bad_mutable_pointer = true; - } - // We always intern with `inner_mutability`, and furthermore we ensured above that if - // that is "immutable", then there are *no* mutable pointers anywhere in the newly - // interned memory -- justifying that we can indeed intern immutably. However this also - // means we can *not* easily intern immutably here if `prov.immutable()` is true and - // `inner_mutability` is `Mut`: there might be other pointers to that allocation, and - // we'd have to somehow check that they are *all* immutable before deciding that this - // allocation can be made immutable. In the future we could consider analyzing all - // pointers before deciding which allocations can be made immutable; but for now we are - // okay with losing some potential for immutability here. This can anyway only affect - // `static mut`. - todo.push((alloc_id, inner_mutability)); - } + })?); } if found_bad_mutable_pointer { return Err(ecx From 5d46b982c539ef3a227bd5557ec8a1648dfc5a5c Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Thu, 25 Jan 2024 10:38:17 +0000 Subject: [PATCH 05/20] Document base vs nested alloc interning --- compiler/rustc_const_eval/src/interpret/intern.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 7621d038855f5..c3a53f90e60a0 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -125,6 +125,9 @@ pub fn intern_const_alloc_recursive< // Intern the base allocation, and initialize todo list for recursive interning. let base_alloc_id = ret.ptr().provenance.unwrap().alloc_id(); + // First we intern the base allocation, as it requires a different mutability. + // This gives us the initial set of nested allocations, which will then all be processed + // recursively in the loop below. let mut todo: Vec<_> = intern_shallow(ecx, base_alloc_id, base_mutability).unwrap().map(|prov| prov).collect(); // We need to distinguish "has just been interned" from "was already in `tcx`", From 0f55e1b117cab62b9216a0636dadffd14ae77f38 Mon Sep 17 00:00:00 2001 From: Markus Reiter Date: Wed, 31 Jan 2024 06:06:50 +0100 Subject: [PATCH 06/20] Simplify `impl_zeroable_primitive` macro. --- library/core/src/num/nonzero.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index 1124719fc8df1..ea922970b92d8 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -35,7 +35,7 @@ mod private { pub trait ZeroablePrimitive: Sized + Copy + private::Sealed {} macro_rules! impl_zeroable_primitive { - ($NonZero:ident ( $primitive:ty )) => { + ($primitive:ty) => { #[unstable( feature = "nonzero_internals", reason = "implementation detail which may disappear or be replaced at any time", @@ -52,18 +52,18 @@ macro_rules! impl_zeroable_primitive { }; } -impl_zeroable_primitive!(NonZeroU8(u8)); -impl_zeroable_primitive!(NonZeroU16(u16)); -impl_zeroable_primitive!(NonZeroU32(u32)); -impl_zeroable_primitive!(NonZeroU64(u64)); -impl_zeroable_primitive!(NonZeroU128(u128)); -impl_zeroable_primitive!(NonZeroUsize(usize)); -impl_zeroable_primitive!(NonZeroI8(i8)); -impl_zeroable_primitive!(NonZeroI16(i16)); -impl_zeroable_primitive!(NonZeroI32(i32)); -impl_zeroable_primitive!(NonZeroI64(i64)); -impl_zeroable_primitive!(NonZeroI128(i128)); -impl_zeroable_primitive!(NonZeroIsize(isize)); +impl_zeroable_primitive!(u8); +impl_zeroable_primitive!(u16); +impl_zeroable_primitive!(u32); +impl_zeroable_primitive!(u64); +impl_zeroable_primitive!(u128); +impl_zeroable_primitive!(usize); +impl_zeroable_primitive!(i8); +impl_zeroable_primitive!(i16); +impl_zeroable_primitive!(i32); +impl_zeroable_primitive!(i64); +impl_zeroable_primitive!(i128); +impl_zeroable_primitive!(isize); /// A value that is known not to equal zero. /// From a5042de74199af0ff162b5581ad40e61de0ccd15 Mon Sep 17 00:00:00 2001 From: Markus Reiter Date: Mon, 22 Jan 2024 18:04:05 +0100 Subject: [PATCH 07/20] Make `NonZero` constructors generic. --- library/core/src/num/nonzero.rs | 155 +++++++++++++++++--------------- 1 file changed, 84 insertions(+), 71 deletions(-) diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index ea922970b92d8..8cbbc2aa1a24c 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -83,6 +83,90 @@ impl_zeroable_primitive!(isize); #[rustc_diagnostic_item = "NonZero"] pub struct NonZero(T); +impl NonZero +where + T: ZeroablePrimitive, +{ + /// Creates a non-zero if the given value is not zero. + #[stable(feature = "nonzero", since = "1.28.0")] + #[rustc_const_stable(feature = "const_nonzero_int_methods", since = "1.47.0")] + #[rustc_allow_const_fn_unstable(const_refs_to_cell)] + #[must_use] + #[inline] + pub const fn new(n: T) -> Option { + // SAFETY: Memory layout optimization guarantees that `Option>` has + // the same layout and size as `T`, with `0` representing `None`. + unsafe { crate::mem::transmute_copy(&n) } + } + + /// Creates a non-zero without checking whether the value is non-zero. + /// This results in undefined behaviour if the value is zero. + /// + /// # Safety + /// + /// The value must not be zero. + #[stable(feature = "nonzero", since = "1.28.0")] + #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] + #[must_use] + #[inline] + pub const unsafe fn new_unchecked(n: T) -> Self { + match Self::new(n) { + Some(n) => n, + None => { + // SAFETY: The caller guarantees that `n` is non-zero, so this is unreachable. + unsafe { + crate::intrinsics::assert_unsafe_precondition!( + "NonZero::new_unchecked requires the argument to be non-zero", + () => false + ); + + crate::hint::unreachable_unchecked() + } + } + } + } + + /// Converts a reference to a non-zero mutable reference + /// if the referenced value is not zero. + #[unstable(feature = "nonzero_from_mut", issue = "106290")] + #[must_use] + #[inline] + pub fn from_mut(n: &mut T) -> Option<&mut Self> { + // SAFETY: Memory layout optimization guarantees that `Option>` has + // the same layout and size as `T`, with `0` representing `None`. + let opt_n = unsafe { &mut *(n as *mut T as *mut Option) }; + + opt_n.as_mut() + } + + /// Converts a mutable reference to a non-zero mutable reference + /// without checking whether the referenced value is non-zero. + /// This results in undefined behavior if the referenced value is zero. + /// + /// # Safety + /// + /// The referenced value must not be zero. + #[unstable(feature = "nonzero_from_mut", issue = "106290")] + #[must_use] + #[inline] + pub unsafe fn from_mut_unchecked(n: &mut T) -> &mut Self { + match Self::from_mut(n) { + Some(n) => n, + None => { + // SAFETY: The caller guarantees that `n` references a value that is non-zero, so this is unreachable. + unsafe { + crate::intrinsics::assert_unsafe_precondition!( + "NonZero::from_mut_unchecked requires the argument to dereference as non-zero", + () => false + ); + + crate::hint::unreachable_unchecked() + } + } + } + } +} + macro_rules! impl_nonzero_fmt { ( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => { $( @@ -100,7 +184,6 @@ macro_rules! impl_nonzero_fmt { macro_rules! nonzero_integer { ( #[$stability:meta] - #[$const_new_unchecked_stability:meta] Self = $Ty:ident, Primitive = $signedness:ident $Int:ident, $(UnsignedNonZero = $UnsignedNonZero:ident,)? @@ -143,74 +226,6 @@ macro_rules! nonzero_integer { pub type $Ty = NonZero<$Int>; impl $Ty { - /// Creates a non-zero without checking whether the value is non-zero. - /// This results in undefined behaviour if the value is zero. - /// - /// # Safety - /// - /// The value must not be zero. - #[$stability] - #[$const_new_unchecked_stability] - #[must_use] - #[inline] - pub const unsafe fn new_unchecked(n: $Int) -> Self { - crate::panic::debug_assert_nounwind!( - n != 0, - concat!(stringify!($Ty), "::new_unchecked requires a non-zero argument") - ); - // SAFETY: this is guaranteed to be safe by the caller. - unsafe { - Self(n) - } - } - - /// Creates a non-zero if the given value is not zero. - #[$stability] - #[rustc_const_stable(feature = "const_nonzero_int_methods", since = "1.47.0")] - #[must_use] - #[inline] - pub const fn new(n: $Int) -> Option { - if n != 0 { - // SAFETY: we just checked that there's no `0` - Some(unsafe { Self(n) }) - } else { - None - } - } - - /// Converts a primitive mutable reference to a non-zero mutable reference - /// without checking whether the referenced value is non-zero. - /// This results in undefined behavior if `*n` is zero. - /// - /// # Safety - /// The referenced value must not be currently zero. - #[unstable(feature = "nonzero_from_mut", issue = "106290")] - #[must_use] - #[inline] - pub unsafe fn from_mut_unchecked(n: &mut $Int) -> &mut Self { - // SAFETY: Self is repr(transparent), and the value is assumed to be non-zero. - unsafe { - let n_alias = &mut *n; - core::intrinsics::assert_unsafe_precondition!( - concat!(stringify!($Ty), "::from_mut_unchecked requires the argument to dereference as non-zero"), - (n_alias: &mut $Int) => *n_alias != 0 - ); - &mut *(n as *mut $Int as *mut Self) - } - } - - /// Converts a primitive mutable reference to a non-zero mutable reference - /// if the referenced integer is not zero. - #[unstable(feature = "nonzero_from_mut", issue = "106290")] - #[must_use] - #[inline] - pub fn from_mut(n: &mut $Int) -> Option<&mut Self> { - // SAFETY: Self is repr(transparent), and the value is non-zero. - // As long as the returned reference is alive, - // the user cannot `*n = 0` directly. - (*n != 0).then(|| unsafe { &mut *(n as *mut $Int as *mut Self) }) - } - /// Returns the value as a primitive type. #[$stability] #[inline] @@ -724,7 +739,6 @@ macro_rules! nonzero_integer { (Self = $Ty:ident, Primitive = unsigned $Int:ident $(,)?) => { nonzero_integer! { #[stable(feature = "nonzero", since = "1.28.0")] - #[rustc_const_stable(feature = "nonzero", since = "1.28.0")] Self = $Ty, Primitive = unsigned $Int, UnsignedPrimitive = $Int, @@ -735,7 +749,6 @@ macro_rules! nonzero_integer { (Self = $Ty:ident, Primitive = signed $Int:ident, $($rest:tt)*) => { nonzero_integer! { #[stable(feature = "signed_nonzero", since = "1.34.0")] - #[rustc_const_stable(feature = "signed_nonzero", since = "1.34.0")] Self = $Ty, Primitive = signed $Int, $($rest)* From 3cc601ac7e43752e0514c6e1cb2415e781c8e666 Mon Sep 17 00:00:00 2001 From: GnomedDev Date: Wed, 31 Jan 2024 13:36:37 +0000 Subject: [PATCH 08/20] Switch OwnedStore handle count to AtomicU32 --- library/proc_macro/src/bridge/client.rs | 9 +++++---- library/proc_macro/src/bridge/handle.rs | 10 +++++----- library/proc_macro/src/bridge/mod.rs | 1 - 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/library/proc_macro/src/bridge/client.rs b/library/proc_macro/src/bridge/client.rs index 52a08cad9110f..9255c3abc8a02 100644 --- a/library/proc_macro/src/bridge/client.rs +++ b/library/proc_macro/src/bridge/client.rs @@ -3,6 +3,7 @@ use super::*; use std::marker::PhantomData; +use std::sync::atomic::AtomicU32; macro_rules! define_handles { ( @@ -12,8 +13,8 @@ macro_rules! define_handles { #[repr(C)] #[allow(non_snake_case)] pub struct HandleCounters { - $($oty: AtomicUsize,)* - $($ity: AtomicUsize,)* + $($oty: AtomicU32,)* + $($ity: AtomicU32,)* } impl HandleCounters { @@ -21,8 +22,8 @@ macro_rules! define_handles { // a wrapper `fn` pointer, once `const fn` can reference `static`s. extern "C" fn get() -> &'static Self { static COUNTERS: HandleCounters = HandleCounters { - $($oty: AtomicUsize::new(1),)* - $($ity: AtomicUsize::new(1),)* + $($oty: AtomicU32::new(1),)* + $($ity: AtomicU32::new(1),)* }; &COUNTERS } diff --git a/library/proc_macro/src/bridge/handle.rs b/library/proc_macro/src/bridge/handle.rs index 00954107b7769..b3a763069974f 100644 --- a/library/proc_macro/src/bridge/handle.rs +++ b/library/proc_macro/src/bridge/handle.rs @@ -4,7 +4,7 @@ use std::collections::BTreeMap; use std::hash::Hash; use std::num::NonZeroU32; use std::ops::{Index, IndexMut}; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicU32, Ordering}; use super::fxhash::FxHashMap; @@ -13,12 +13,12 @@ pub(super) type Handle = NonZeroU32; /// A store that associates values of type `T` with numeric handles. A value can /// be looked up using its handle. pub(super) struct OwnedStore { - counter: &'static AtomicUsize, + counter: &'static AtomicU32, data: BTreeMap, } impl OwnedStore { - pub(super) fn new(counter: &'static AtomicUsize) -> Self { + pub(super) fn new(counter: &'static AtomicU32) -> Self { // Ensure the handle counter isn't 0, which would panic later, // when `NonZeroU32::new` (aka `Handle::new`) is called in `alloc`. assert_ne!(counter.load(Ordering::SeqCst), 0); @@ -30,7 +30,7 @@ impl OwnedStore { impl OwnedStore { pub(super) fn alloc(&mut self, x: T) -> Handle { let counter = self.counter.fetch_add(1, Ordering::SeqCst); - let handle = Handle::new(counter as u32).expect("`proc_macro` handle counter overflowed"); + let handle = Handle::new(counter).expect("`proc_macro` handle counter overflowed"); assert!(self.data.insert(handle, x).is_none()); handle } @@ -60,7 +60,7 @@ pub(super) struct InternedStore { } impl InternedStore { - pub(super) fn new(counter: &'static AtomicUsize) -> Self { + pub(super) fn new(counter: &'static AtomicU32) -> Self { InternedStore { owned: OwnedStore::new(counter), interner: FxHashMap::default() } } diff --git a/library/proc_macro/src/bridge/mod.rs b/library/proc_macro/src/bridge/mod.rs index 75bf3329786a4..55e24b6491c79 100644 --- a/library/proc_macro/src/bridge/mod.rs +++ b/library/proc_macro/src/bridge/mod.rs @@ -16,7 +16,6 @@ use std::mem; use std::ops::Bound; use std::ops::Range; use std::panic; -use std::sync::atomic::AtomicUsize; use std::sync::Once; use std::thread; From 2e212b79e09bce189a787a5b0c05ee5318e3c574 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Thu, 25 Jan 2024 13:35:40 +1100 Subject: [PATCH 09/20] coverage: Split out counter increment sites from BCB node/edge counters This makes it possible for two nodes/edges in the coverage graph to share the same counter, without causing the instrumentor to inject unwanted duplicate counter-increment statements. --- .../src/coverage/counters.rs | 70 ++++++++------- .../rustc_mir_transform/src/coverage/mod.rs | 89 +++++++++---------- 2 files changed, 79 insertions(+), 80 deletions(-) diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index 8c11dea5d4ee6..9a1d8bae6b410 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -1,4 +1,5 @@ -use rustc_data_structures::fx::FxIndexMap; +use rustc_data_structures::captures::Captures; +use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::WithNumNodes; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; @@ -38,19 +39,27 @@ impl Debug for BcbCounter { } } +#[derive(Debug)] +pub(super) enum CounterIncrementSite { + Node { bcb: BasicCoverageBlock }, + Edge { from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock }, +} + /// Generates and stores coverage counter and coverage expression information /// associated with nodes/edges in the BCB graph. pub(super) struct CoverageCounters { - next_counter_id: CounterId, + /// List of places where a counter-increment statement should be injected + /// into MIR, each with its corresponding counter ID. + counter_increment_sites: IndexVec, /// Coverage counters/expressions that are associated with individual BCBs. bcb_counters: IndexVec>, /// Coverage counters/expressions that are associated with the control-flow /// edge between two BCBs. /// - /// The iteration order of this map can affect the precise contents of MIR, - /// so we use `FxIndexMap` to avoid query stability hazards. - bcb_edge_counters: FxIndexMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, + /// We currently don't iterate over this map, but if we do in the future, + /// switch it back to `FxIndexMap` to avoid query stability hazards. + bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, /// Tracks which BCBs have a counter associated with some incoming edge. /// Only used by assertions, to verify that BCBs with incoming edge /// counters do not have their own physical counters (expressions are allowed). @@ -71,9 +80,9 @@ impl CoverageCounters { let num_bcbs = basic_coverage_blocks.num_nodes(); let mut this = Self { - next_counter_id: CounterId::START, + counter_increment_sites: IndexVec::new(), bcb_counters: IndexVec::from_elem_n(None, num_bcbs), - bcb_edge_counters: FxIndexMap::default(), + bcb_edge_counters: FxHashMap::default(), bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs), expressions: IndexVec::new(), }; @@ -84,8 +93,8 @@ impl CoverageCounters { this } - fn make_counter(&mut self) -> BcbCounter { - let id = self.next_counter(); + fn make_counter(&mut self, site: CounterIncrementSite) -> BcbCounter { + let id = self.counter_increment_sites.push(site); BcbCounter::Counter { id } } @@ -103,15 +112,8 @@ impl CoverageCounters { self.make_expression(lhs, Op::Add, rhs) } - /// Counter IDs start from one and go up. - fn next_counter(&mut self) -> CounterId { - let next = self.next_counter_id; - self.next_counter_id = self.next_counter_id + 1; - next - } - pub(super) fn num_counters(&self) -> usize { - self.next_counter_id.as_usize() + self.counter_increment_sites.len() } #[cfg(test)] @@ -171,22 +173,26 @@ impl CoverageCounters { self.bcb_counters[bcb] } - pub(super) fn bcb_node_counters( + /// Returns an iterator over all the nodes/edges in the coverage graph that + /// should have a counter-increment statement injected into MIR, along with + /// each site's corresponding counter ID. + pub(super) fn counter_increment_sites( &self, - ) -> impl Iterator { - self.bcb_counters - .iter_enumerated() - .filter_map(|(bcb, counter_kind)| Some((bcb, counter_kind.as_ref()?))) + ) -> impl Iterator { + self.counter_increment_sites.iter_enumerated() } - /// For each edge in the BCB graph that has an associated counter, yields - /// that edge's *from* and *to* nodes, and its counter. - pub(super) fn bcb_edge_counters( + /// Returns an iterator over the subset of BCB nodes that have been associated + /// with a counter *expression*, along with the ID of that expression. + pub(super) fn bcb_nodes_with_coverage_expressions( &self, - ) -> impl Iterator { - self.bcb_edge_counters - .iter() - .map(|(&(from_bcb, to_bcb), counter_kind)| (from_bcb, to_bcb, counter_kind)) + ) -> impl Iterator + Captures<'_> { + self.bcb_counters.iter_enumerated().filter_map(|(bcb, &counter_kind)| match counter_kind { + // Yield the BCB along with its associated expression ID. + Some(BcbCounter::Expression { id }) => Some((bcb, id)), + // This BCB is associated with a counter or nothing, so skip it. + Some(BcbCounter::Counter { .. }) | None => None, + }) } pub(super) fn into_expressions(self) -> IndexVec { @@ -339,7 +345,8 @@ impl<'a> MakeBcbCounters<'a> { // program results in a tight infinite loop, but it should still compile. let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb); if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) { - let counter_kind = self.coverage_counters.make_counter(); + let counter_kind = + self.coverage_counters.make_counter(CounterIncrementSite::Node { bcb }); if one_path_to_target { debug!("{bcb:?} gets a new counter: {counter_kind:?}"); } else { @@ -401,7 +408,8 @@ impl<'a> MakeBcbCounters<'a> { } // Make a new counter to count this edge. - let counter_kind = self.coverage_counters.make_counter(); + let counter_kind = + self.coverage_counters.make_counter(CounterIncrementSite::Edge { from_bcb, to_bcb }); debug!("Edge {from_bcb:?}->{to_bcb:?} gets a new counter: {counter_kind:?}"); self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind) } diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 5fb72fcf0cf3e..ef40af3716c1f 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -7,7 +7,7 @@ mod spans; #[cfg(test)] mod tests; -use self::counters::{BcbCounter, CoverageCounters}; +use self::counters::{CounterIncrementSite, CoverageCounters}; use self::graph::{BasicCoverageBlock, CoverageGraph}; use self::spans::{BcbMapping, BcbMappingKind, CoverageSpans}; @@ -155,61 +155,52 @@ fn inject_coverage_statements<'tcx>( bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool, coverage_counters: &CoverageCounters, ) { - // Process the counters associated with BCB nodes. - for (bcb, counter_kind) in coverage_counters.bcb_node_counters() { - let do_inject = match counter_kind { - // Counter-increment statements always need to be injected. - BcbCounter::Counter { .. } => true, - // The only purpose of expression-used statements is to detect - // when a mapping is unreachable, so we only inject them for - // expressions with one or more mappings. - BcbCounter::Expression { .. } => bcb_has_coverage_spans(bcb), - }; - if do_inject { - inject_statement( - mir_body, - make_mir_coverage_kind(counter_kind), - basic_coverage_blocks[bcb].leader_bb(), - ); - } - } - - // Process the counters associated with BCB edges. - for (from_bcb, to_bcb, counter_kind) in coverage_counters.bcb_edge_counters() { - let do_inject = match counter_kind { - // Counter-increment statements always need to be injected. - BcbCounter::Counter { .. } => true, - // BCB-edge expressions never have mappings, so they never need - // a corresponding statement. - BcbCounter::Expression { .. } => false, + // Inject counter-increment statements into MIR. + for (id, counter_increment_site) in coverage_counters.counter_increment_sites() { + // Determine the block to inject a counter-increment statement into. + // For BCB nodes this is just their first block, but for edges we need + // to create a new block between the two BCBs, and inject into that. + let target_bb = match *counter_increment_site { + CounterIncrementSite::Node { bcb } => basic_coverage_blocks[bcb].leader_bb(), + CounterIncrementSite::Edge { from_bcb, to_bcb } => { + // Create a new block between the last block of `from_bcb` and + // the first block of `to_bcb`. + let from_bb = basic_coverage_blocks[from_bcb].last_bb(); + let to_bb = basic_coverage_blocks[to_bcb].leader_bb(); + + let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb); + debug!( + "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \ + requires a new MIR BasicBlock {new_bb:?} for counter increment {id:?}", + ); + new_bb + } }; - if !do_inject { - continue; - } - - // We need to inject a coverage statement into a new BB between the - // last BB of `from_bcb` and the first BB of `to_bcb`. - let from_bb = basic_coverage_blocks[from_bcb].last_bb(); - let to_bb = basic_coverage_blocks[to_bcb].leader_bb(); - let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb); - debug!( - "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \ - requires a new MIR BasicBlock {new_bb:?} for edge counter {counter_kind:?}", - ); - - // Inject a counter into the newly-created BB. - inject_statement(mir_body, make_mir_coverage_kind(counter_kind), new_bb); + inject_statement(mir_body, CoverageKind::CounterIncrement { id }, target_bb); } -} -fn make_mir_coverage_kind(counter_kind: &BcbCounter) -> CoverageKind { - match *counter_kind { - BcbCounter::Counter { id } => CoverageKind::CounterIncrement { id }, - BcbCounter::Expression { id } => CoverageKind::ExpressionUsed { id }, + // For each counter expression that is directly associated with at least one + // span, we inject an "expression-used" statement, so that coverage codegen + // can check whether the injected statement survived MIR optimization. + // (BCB edges can't have spans, so we only need to process BCB nodes here.) + // + // See the code in `rustc_codegen_llvm::coverageinfo::map_data` that deals + // with "expressions seen" and "zero terms". + for (bcb, expression_id) in coverage_counters + .bcb_nodes_with_coverage_expressions() + .filter(|&(bcb, _)| bcb_has_coverage_spans(bcb)) + { + inject_statement( + mir_body, + CoverageKind::ExpressionUsed { id: expression_id }, + basic_coverage_blocks[bcb].leader_bb(), + ); } } +/// Given two basic blocks that have a control-flow edge between them, creates +/// and returns a new block that sits between those blocks. fn inject_edge_counter_basic_block( mir_body: &mut mir::Body<'_>, from_bb: BasicBlock, From f5d6eb30a845109ab6f69855b4720a15573e8fad Mon Sep 17 00:00:00 2001 From: Vadim Petrochenkov Date: Tue, 30 Jan 2024 22:03:16 +0300 Subject: [PATCH 10/20] hir: Stop keeping prefixes for most of `use` list stems And make sure all other imports have non-empty resolution lists. --- compiler/rustc_ast_lowering/src/item.rs | 19 +++++++++++++------ compiler/rustc_ast_lowering/src/lib.rs | 12 +++++++++--- compiler/rustc_ast_lowering/src/path.rs | 1 + 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index dd3f7289a60b2..60685119fb98e 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -496,8 +496,7 @@ impl<'hir> LoweringContext<'_, 'hir> { } } - let res = - self.expect_full_res_from_use(id).map(|res| self.lower_res(res)).collect(); + let res = self.lower_import_res(id, path.span); let path = self.lower_use_path(res, &path, ParamMode::Explicit); hir::ItemKind::Use(path, hir::UseKind::Single) } @@ -533,7 +532,8 @@ impl<'hir> LoweringContext<'_, 'hir> { // for that we return the `{}` import (called the // `ListStem`). - let prefix = Path { segments, span: prefix.span.to(path.span), tokens: None }; + let span = prefix.span.to(path.span); + let prefix = Path { segments, span, tokens: None }; // Add all the nested `PathListItem`s to the HIR. for &(ref use_tree, id) in trees { @@ -567,9 +567,16 @@ impl<'hir> LoweringContext<'_, 'hir> { }); } - let res = - self.expect_full_res_from_use(id).map(|res| self.lower_res(res)).collect(); - let path = self.lower_use_path(res, &prefix, ParamMode::Explicit); + let path = if trees.is_empty() && !prefix.segments.is_empty() { + // For empty lists we need to lower the prefix so it is checked for things + // like stability later. + let res = self.lower_import_res(id, span); + self.lower_use_path(res, &prefix, ParamMode::Explicit) + } else { + // For non-empty lists we can just drop all the data, the prefix is already + // present in HIR as a part of nested imports. + self.arena.alloc(hir::UsePath { res: smallvec![], segments: &[], span }) + }; hir::ItemKind::Use(path, hir::UseKind::ListStem) } } diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 3621844efc8d2..14a83c7eda6db 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -63,7 +63,7 @@ use rustc_middle::ty::{ResolverAstLowering, TyCtxt}; use rustc_session::parse::{add_feature_diagnostics, feature_err}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{DesugaringKind, Span, DUMMY_SP}; -use smallvec::SmallVec; +use smallvec::{smallvec, SmallVec}; use std::collections::hash_map::Entry; use thin_vec::ThinVec; @@ -749,8 +749,14 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { self.resolver.get_partial_res(id).map_or(Res::Err, |pr| pr.expect_full_res()) } - fn expect_full_res_from_use(&mut self, id: NodeId) -> impl Iterator> { - self.resolver.get_import_res(id).present_items() + fn lower_import_res(&mut self, id: NodeId, span: Span) -> SmallVec<[Res; 3]> { + let res = self.resolver.get_import_res(id).present_items(); + let res: SmallVec<_> = res.map(|res| self.lower_res(res)).collect(); + if res.is_empty() { + self.dcx().span_delayed_bug(span, "no resolution for an import"); + return smallvec![Res::Err]; + } + res } fn make_lang_item_qpath(&mut self, lang_item: hir::LangItem, span: Span) -> hir::QPath<'hir> { diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs index c679ee56fcd8b..4df68f8e9297d 100644 --- a/compiler/rustc_ast_lowering/src/path.rs +++ b/compiler/rustc_ast_lowering/src/path.rs @@ -156,6 +156,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { p: &Path, param_mode: ParamMode, ) -> &'hir hir::UsePath<'hir> { + assert!((1..=3).contains(&res.len())); self.arena.alloc(hir::UsePath { res, segments: self.arena.alloc_from_iter(p.segments.iter().map(|segment| { From 285d8c225d05966040f6a69bfdea83165b54483f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Le=C3=B3n=20Orell=20Valerian=20Liehr?= Date: Sat, 3 Feb 2024 01:32:01 +0100 Subject: [PATCH 11/20] Suggest `[tail @ ..]` on `[..tail]` and `[...tail]` where `tail` is unresolved --- compiler/rustc_resolve/messages.ftl | 3 + compiler/rustc_resolve/src/errors.rs | 14 +++++ .../rustc_resolve/src/late/diagnostics.rs | 38 +++++++----- ...-pattern-meant-to-be-slice-rest-pattern.rs | 16 ++++- ...tern-meant-to-be-slice-rest-pattern.stderr | 61 +++++++++++++++++-- 5 files changed, 112 insertions(+), 20 deletions(-) diff --git a/compiler/rustc_resolve/messages.ftl b/compiler/rustc_resolve/messages.ftl index 02847a0f5f914..769c469a5ab2b 100644 --- a/compiler/rustc_resolve/messages.ftl +++ b/compiler/rustc_resolve/messages.ftl @@ -292,6 +292,9 @@ resolve_underscore_lifetime_name_cannot_be_used_here = resolve_unexpected_res_change_ty_to_const_param_sugg = you might have meant to write a const parameter here +resolve_unexpected_res_use_at_op_in_slice_pat_with_range_sugg = + if you meant to collect the rest of the slice in `{$ident}`, use the at operator + resolve_unreachable_label = use of unreachable label `{$name}` .label = unreachable label `{$name}` diff --git a/compiler/rustc_resolve/src/errors.rs b/compiler/rustc_resolve/src/errors.rs index 655fc9812d747..49a0e597dbc75 100644 --- a/compiler/rustc_resolve/src/errors.rs +++ b/compiler/rustc_resolve/src/errors.rs @@ -800,3 +800,17 @@ pub(crate) struct UnexpectedResChangeTyToConstParamSugg { #[applicability] pub applicability: Applicability, } + +#[derive(Subdiagnostic)] +#[suggestion( + resolve_unexpected_res_use_at_op_in_slice_pat_with_range_sugg, + code = "{snippet}", + applicability = "maybe-incorrect", + style = "verbose" +)] +pub(crate) struct UnexpectedResUseAtOpInSlicePatWithRangeSugg { + #[primary_span] + pub span: Span, + pub ident: Ident, + pub snippet: String, +} diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 310c126213ad2..5d712461993d8 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -1077,24 +1077,34 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { err: &mut Diagnostic, path: &[Segment], ) { - if let Some(pat) = self.diagnostic_metadata.current_pat - && let ast::PatKind::Range(Some(start), None, range) = &pat.kind - && let ExprKind::Path(None, range_path) = &start.kind + let Some(pat) = self.diagnostic_metadata.current_pat else { return }; + let (bound, side, range) = match &pat.kind { + ast::PatKind::Range(Some(bound), None, range) => (bound, Side::Start, range), + ast::PatKind::Range(None, Some(bound), range) => (bound, Side::End, range), + _ => return, + }; + if let ExprKind::Path(None, range_path) = &bound.kind && let [segment] = &range_path.segments[..] && let [s] = path && segment.ident == s.ident + && segment.ident.span.eq_ctxt(range.span) { - // We've encountered `[first, rest..]` where the user might have meant - // `[first, rest @ ..]` (#88404). - err.span_suggestion_verbose( - segment.ident.span.between(range.span), - format!( - "if you meant to collect the rest of the slice in `{}`, use the at operator", - segment.ident, - ), - " @ ", - Applicability::MaybeIncorrect, - ); + // We've encountered `[first, rest..]` (#88404) or `[first, ..rest]` (#120591) + // where the user might have meant `[first, rest @ ..]`. + let (span, snippet) = match side { + Side::Start => (segment.ident.span.between(range.span), " @ ".into()), + Side::End => (range.span.to(segment.ident.span), format!("{} @ ..", segment.ident)), + }; + err.subdiagnostic(errors::UnexpectedResUseAtOpInSlicePatWithRangeSugg { + span, + ident: segment.ident, + snippet, + }); + } + + enum Side { + Start, + End, } } diff --git a/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.rs b/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.rs index a619fcafc8614..1eba7aeb4d64a 100644 --- a/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.rs +++ b/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.rs @@ -1,9 +1,23 @@ fn main() { match &[1, 2, 3][..] { - [1, rest..] => println!("{rest:?}"), + [1, rest..] => println!("{rest}"), //~^ ERROR cannot find value `rest` in this scope //~| ERROR cannot find value `rest` in this scope //~| ERROR `X..` patterns in slices are experimental _ => {} } + match &[4, 5, 6][..] { + [] => {} + [_, ..tail] => println!("{tail}"), + //~^ ERROR cannot find value `tail` in this scope + //~| ERROR cannot find value `tail` in this scope + //~| ERROR exclusive range pattern syntax is experimental + } + match &[7, 8, 9][..] { + [] => {} + [_, ...tail] => println!("{tail}"), + //~^ ERROR cannot find value `tail` in this scope + //~| ERROR cannot find value `tail` in this scope + //~| ERROR range-to patterns with `...` are not allowed + } } diff --git a/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.stderr b/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.stderr index c3c9131b63eb2..3a19517c85bd2 100644 --- a/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.stderr +++ b/tests/ui/pattern/range-pattern-meant-to-be-slice-rest-pattern.stderr @@ -1,31 +1,82 @@ +error: range-to patterns with `...` are not allowed + --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:18:13 + | +LL | [_, ...tail] => println!("{tail}"), + | ^^^ help: use `..=` instead + error[E0425]: cannot find value `rest` in this scope --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:3:13 | -LL | [1, rest..] => println!("{rest:?}"), +LL | [1, rest..] => println!("{rest}"), | ^^^^ not found in this scope | help: if you meant to collect the rest of the slice in `rest`, use the at operator | -LL | [1, rest @ ..] => println!("{rest:?}"), +LL | [1, rest @ ..] => println!("{rest}"), | + error[E0425]: cannot find value `rest` in this scope --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:3:35 | -LL | [1, rest..] => println!("{rest:?}"), +LL | [1, rest..] => println!("{rest}"), + | ^^^^ not found in this scope + +error[E0425]: cannot find value `tail` in this scope + --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:11:15 + | +LL | [_, ..tail] => println!("{tail}"), + | ^^^^ not found in this scope + | +help: if you meant to collect the rest of the slice in `tail`, use the at operator + | +LL | [_, tail @ ..] => println!("{tail}"), + | ~~~~~~~~~ + +error[E0425]: cannot find value `tail` in this scope + --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:11:35 + | +LL | [_, ..tail] => println!("{tail}"), | ^^^^ not found in this scope +error[E0425]: cannot find value `tail` in this scope + --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:18:16 + | +LL | [_, ...tail] => println!("{tail}"), + | ^^^^ not found in this scope + | +help: if you meant to collect the rest of the slice in `tail`, use the at operator + | +LL | [_, tail @ ..] => println!("{tail}"), + | ~~~~~~~~~ + +error[E0425]: cannot find value `tail` in this scope + --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:18:36 + | +LL | [_, ...tail] => println!("{tail}"), + | ^^^^ not found in this scope + error[E0658]: `X..` patterns in slices are experimental --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:3:13 | -LL | [1, rest..] => println!("{rest:?}"), +LL | [1, rest..] => println!("{rest}"), | ^^^^^^ | = note: see issue #67264 for more information = help: add `#![feature(half_open_range_patterns_in_slices)]` to the crate attributes to enable = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date -error: aborting due to 3 previous errors +error[E0658]: exclusive range pattern syntax is experimental + --> $DIR/range-pattern-meant-to-be-slice-rest-pattern.rs:11:13 + | +LL | [_, ..tail] => println!("{tail}"), + | ^^^^^^ + | + = note: see issue #37854 for more information + = help: add `#![feature(exclusive_range_pattern)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + = help: use an inclusive range pattern, like N..=M + +error: aborting due to 9 previous errors Some errors have detailed explanations: E0425, E0658. For more information about an error, try `rustc --explain E0425`. From e8c3cbf44b9f003482871e8638859c8f65b234bb Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 31 Jan 2024 09:25:42 +1100 Subject: [PATCH 12/20] Make `Diagnostic::is_error` return false for `Level::FailureNote`. It doesn't affect behaviour, but makes sense with (a) `FailureNote` having `()` as its emission guarantee, and (b) in `Level` the `is_error` levels now are all listed before the non-`is_error` levels. --- compiler/rustc_errors/src/diagnostic.rs | 4 ++-- compiler/rustc_errors/src/lib.rs | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 8ad4925cff288..09570fe74b6ff 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -238,8 +238,7 @@ impl Diagnostic { Level::Bug | Level::DelayedBug(DelayedBugKind::Normal) | Level::Fatal - | Level::Error - | Level::FailureNote => true, + | Level::Error => true, Level::ForceWarning(_) | Level::Warning @@ -248,6 +247,7 @@ impl Diagnostic { | Level::OnceNote | Level::Help | Level::OnceHelp + | Level::FailureNote | Level::Allow | Level::Expect(_) => false, } diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index b2bd4d8eb956e..42b1c85e93968 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -1541,6 +1541,8 @@ pub enum Level { /// /// The [`LintExpectationId`] is used for expected lint diagnostics. In all other cases this /// should be `None`. + /// + /// Its `EmissionGuarantee` is `()`. ForceWarning(Option), /// A warning about the code being compiled. Does not prevent compilation from finishing. @@ -1570,7 +1572,8 @@ pub enum Level { /// Its `EmissionGuarantee` is `()`. OnceHelp, - /// Similar to `Note`, but used in cases where compilation has failed. Rare. + /// Similar to `Note`, but used in cases where compilation has failed. When printed for human + /// consumption, it doesn't have any kind of `note:` label. Rare. /// /// Its `EmissionGuarantee` is `()`. FailureNote, From 5dd0431386867a051f726cbba6e0f46f7bba5e5d Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 31 Jan 2024 13:56:22 +1100 Subject: [PATCH 13/20] Tighten the assertion in `downgrade_to_delayed_bug`. I.e. `Bug` and `Fatal` level diagnostics are never downgraded. --- compiler/rustc_errors/src/diagnostic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 09570fe74b6ff..299e4a840f726 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -306,7 +306,7 @@ impl Diagnostic { #[track_caller] pub fn downgrade_to_delayed_bug(&mut self) { assert!( - self.is_error(), + matches!(self.level, Level::Error | Level::DelayedBug(_)), "downgrade_to_delayed_bug: cannot downgrade {:?} to DelayedBug: not an error", self.level ); From c3673868325f95203d5291f2fa3a399425c14876 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 31 Jan 2024 15:10:23 +1100 Subject: [PATCH 14/20] Refactor `emit_diagnostic`. - Combine two different blocks involving `diagnostic.level.get_expectation_id()` into one. - Combine several `if`s involving `diagnostic.level` into a single `match`. This requires reordering some of the operations, but this has no functional effect. --- compiler/rustc_errors/src/lib.rs | 82 +++++++++++++++----------------- 1 file changed, 39 insertions(+), 43 deletions(-) diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index 42b1c85e93968..b3461b676be27 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -1247,24 +1247,41 @@ impl DiagCtxtInner { } fn emit_diagnostic(&mut self, mut diagnostic: Diagnostic) -> Option { - // The `LintExpectationId` can be stable or unstable depending on when it was created. - // Diagnostics created before the definition of `HirId`s are unstable and can not yet - // be stored. Instead, they are buffered until the `LintExpectationId` is replaced by - // a stable one by the `LintLevelsBuilder`. - if let Some(LintExpectationId::Unstable { .. }) = diagnostic.level.get_expectation_id() { - self.unstable_expect_diagnostics.push(diagnostic); - return None; + if let Some(expectation_id) = diagnostic.level.get_expectation_id() { + // The `LintExpectationId` can be stable or unstable depending on when it was created. + // Diagnostics created before the definition of `HirId`s are unstable and can not yet + // be stored. Instead, they are buffered until the `LintExpectationId` is replaced by + // a stable one by the `LintLevelsBuilder`. + if let LintExpectationId::Unstable { .. } = expectation_id { + self.unstable_expect_diagnostics.push(diagnostic); + return None; + } + self.suppressed_expected_diag = true; + self.fulfilled_expectations.insert(expectation_id.normalize()); + } + + if diagnostic.has_future_breakage() { + // Future breakages aren't emitted if they're Level::Allow, + // but they still need to be constructed and stashed below, + // so they'll trigger the good-path bug check. + self.suppressed_expected_diag = true; + self.future_breakage_diagnostics.push(diagnostic.clone()); + } + + if matches!(diagnostic.level, DelayedBug(_)) && self.flags.eagerly_emit_delayed_bugs { + diagnostic.level = Error; } - // FIXME(eddyb) this should check for `has_errors` and stop pushing - // once *any* errors were emitted (and truncate `span_delayed_bugs` - // when an error is first emitted, also), but maybe there's a case - // in which that's not sound? otherwise this is really inefficient. match diagnostic.level { - DelayedBug(_) if self.flags.eagerly_emit_delayed_bugs => { - diagnostic.level = Error; + // This must come after the possible promotion of `DelayedBug` to `Error` above. + Fatal | Error if self.treat_next_err_as_bug() => { + diagnostic.level = Bug; } DelayedBug(DelayedBugKind::Normal) => { + // FIXME(eddyb) this should check for `has_errors` and stop pushing + // once *any* errors were emitted (and truncate `span_delayed_bugs` + // when an error is first emitted, also), but maybe there's a case + // in which that's not sound? otherwise this is really inefficient. let backtrace = std::backtrace::Backtrace::capture(); self.span_delayed_bugs .push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace)); @@ -1277,38 +1294,17 @@ impl DiagCtxtInner { .push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace)); return None; } - _ => {} - } - - // This must come after the possible promotion of `DelayedBug` to - // `Error` above. - if matches!(diagnostic.level, Error | Fatal) && self.treat_next_err_as_bug() { - diagnostic.level = Bug; - } - - if diagnostic.has_future_breakage() { - // Future breakages aren't emitted if they're Level::Allow, - // but they still need to be constructed and stashed below, - // so they'll trigger the good-path bug check. - self.suppressed_expected_diag = true; - self.future_breakage_diagnostics.push(diagnostic.clone()); - } - - if let Some(expectation_id) = diagnostic.level.get_expectation_id() { - self.suppressed_expected_diag = true; - self.fulfilled_expectations.insert(expectation_id.normalize()); - } - - if diagnostic.level == Warning && !self.flags.can_emit_warnings { - if diagnostic.has_future_breakage() { + Warning if !self.flags.can_emit_warnings => { + if diagnostic.has_future_breakage() { + (*TRACK_DIAGNOSTIC)(diagnostic, &mut |_| {}); + } + return None; + } + Allow | Expect(_) => { (*TRACK_DIAGNOSTIC)(diagnostic, &mut |_| {}); + return None; } - return None; - } - - if matches!(diagnostic.level, Expect(_) | Allow) { - (*TRACK_DIAGNOSTIC)(diagnostic, &mut |_| {}); - return None; + _ => {} } let mut guaranteed = None; From 59e0bc2de7134a2d88e9c14db32884e631e90373 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 31 Jan 2024 11:23:54 +1100 Subject: [PATCH 15/20] Split `Level::DelayedBug` in two. The two kinds of delayed bug have quite different semantics so a stronger conceptual separation is nice. (`is_error` is a good example, because the two kinds have different behaviour.) The commit also moves the `DelayedBug` variant after `Error` in `Level`, to reflect the fact that it's weaker than `Error` -- it might trigger an error but also might not. (The pre-existing `downgrade_to_delayed_bug` function also reflects the notion that delayed bugs are lower/after normal errors.) Plus it condenses some of the comments on `Level` into a table, for easier reading, and introduces `can_be_top_or_sub` to indicate which levels can be used in top-level diagnostics vs. subdiagnostics. Finally, it renames `DiagCtxtInner::span_delayed_bugs` as `DiagCtxtInner::delayed_bugs`. The `span_` prefix is unnecessary because some delayed bugs don't have a span. --- compiler/rustc_error_messages/src/lib.rs | 2 +- .../src/annotate_snippet_emitter_writer.rs | 6 +- compiler/rustc_errors/src/diagnostic.rs | 18 +- compiler/rustc_errors/src/emitter.rs | 1 + compiler/rustc_errors/src/lib.rs | 159 ++++++++++-------- .../equality-in-canonical-query.clone.stderr | 2 +- ...equality_in_canonical_query.current.stderr | 2 +- 7 files changed, 101 insertions(+), 89 deletions(-) diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs index 8fd7c5764797e..d212e18b4cd72 100644 --- a/compiler/rustc_error_messages/src/lib.rs +++ b/compiler/rustc_error_messages/src/lib.rs @@ -378,7 +378,7 @@ impl From> for DiagnosticMessage { } } -/// A workaround for "good path" ICEs when formatting types in disabled lints. +/// A workaround for good_path_delayed_bug ICEs when formatting types in disabled lints. /// /// Delays formatting until `.into(): DiagnosticMessage` is used. pub struct DelayDm(pub F); diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs index 949f52ef6b586..06f6c58c5ff2e 100644 --- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs +++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs @@ -85,7 +85,11 @@ fn source_string(file: Lrc, line: &Line) -> String { /// Maps `Diagnostic::Level` to `snippet::AnnotationType` fn annotation_type_for_level(level: Level) -> AnnotationType { match level { - Level::Bug | Level::DelayedBug(_) | Level::Fatal | Level::Error => AnnotationType::Error, + Level::Bug + | Level::Fatal + | Level::Error + | Level::DelayedBug + | Level::GoodPathDelayedBug => AnnotationType::Error, Level::ForceWarning(_) | Level::Warning => AnnotationType::Warning, Level::Note | Level::OnceNote => AnnotationType::Note, Level::Help | Level::OnceHelp => AnnotationType::Help, diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 299e4a840f726..1763c355069a9 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -1,8 +1,7 @@ use crate::snippet::Style; use crate::{ - CodeSuggestion, DelayedBugKind, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee, - ErrCode, Level, MultiSpan, SubdiagnosticMessage, Substitution, SubstitutionPart, - SuggestionStyle, + CodeSuggestion, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee, ErrCode, Level, + MultiSpan, SubdiagnosticMessage, Substitution, SubstitutionPart, SuggestionStyle, }; use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_error_messages::fluent_value_from_str_list_sep_by_and; @@ -235,14 +234,11 @@ impl Diagnostic { pub fn is_error(&self) -> bool { match self.level { - Level::Bug - | Level::DelayedBug(DelayedBugKind::Normal) - | Level::Fatal - | Level::Error => true, + Level::Bug | Level::Fatal | Level::Error | Level::DelayedBug => true, - Level::ForceWarning(_) + Level::GoodPathDelayedBug + | Level::ForceWarning(_) | Level::Warning - | Level::DelayedBug(DelayedBugKind::GoodPath) | Level::Note | Level::OnceNote | Level::Help @@ -306,11 +302,11 @@ impl Diagnostic { #[track_caller] pub fn downgrade_to_delayed_bug(&mut self) { assert!( - matches!(self.level, Level::Error | Level::DelayedBug(_)), + matches!(self.level, Level::Error | Level::DelayedBug), "downgrade_to_delayed_bug: cannot downgrade {:?} to DelayedBug: not an error", self.level ); - self.level = Level::DelayedBug(DelayedBugKind::Normal); + self.level = Level::DelayedBug; } /// Appends a labeled span to the diagnostic. diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index 4be5ed923e5e0..6370e1d387c37 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -2116,6 +2116,7 @@ impl HumanEmitter { } if !self.short_message { for child in children { + assert!(child.level.can_be_top_or_sub().1); let span = &child.span; if let Err(err) = self.emit_messages_default_inner( span, diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index b3461b676be27..cfb2dfbeb984a 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -439,7 +439,7 @@ struct DiagCtxtInner { has_printed: bool, emitter: Box, - span_delayed_bugs: Vec, + delayed_bugs: Vec, good_path_delayed_bugs: Vec, /// This flag indicates that an expected diagnostic was emitted and suppressed. /// This is used for the `good_path_delayed_bugs` check. @@ -523,8 +523,7 @@ fn default_track_diagnostic(diag: Diagnostic, f: &mut dyn FnMut(Diagnostic)) { pub static TRACK_DIAGNOSTIC: AtomicRef = AtomicRef::new(&(default_track_diagnostic as _)); -#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug, Encodable, Decodable)] -pub enum DelayedBugKind { +enum DelayedBugKind { Normal, GoodPath, } @@ -557,11 +556,6 @@ impl Drop for DiagCtxtInner { self.flush_delayed(DelayedBugKind::Normal) } - // FIXME(eddyb) this explains what `good_path_delayed_bugs` are! - // They're `span_delayed_bugs` but for "require some diagnostic happened" - // instead of "require some error happened". Sadly that isn't ideal, as - // lints can be `#[allow]`'d, potentially leading to this triggering. - // Also, "good path" should be replaced with a better naming. if !self.has_printed && !self.suppressed_expected_diag && !std::thread::panicking() { self.flush_delayed(DelayedBugKind::GoodPath); } @@ -608,7 +602,7 @@ impl DiagCtxt { deduplicated_warn_count: 0, has_printed: false, emitter, - span_delayed_bugs: Vec::new(), + delayed_bugs: Vec::new(), good_path_delayed_bugs: Vec::new(), suppressed_expected_diag: false, taught_diagnostics: Default::default(), @@ -664,7 +658,7 @@ impl DiagCtxt { inner.has_printed = false; // actually free the underlying memory (which `clear` would not do) - inner.span_delayed_bugs = Default::default(); + inner.delayed_bugs = Default::default(); inner.good_path_delayed_bugs = Default::default(); inner.taught_diagnostics = Default::default(); inner.emitted_diagnostic_codes = Default::default(); @@ -865,8 +859,7 @@ impl DiagCtxt { /// directly). #[track_caller] pub fn delayed_bug(&self, msg: impl Into) -> ErrorGuaranteed { - DiagnosticBuilder::::new(self, DelayedBug(DelayedBugKind::Normal), msg) - .emit() + DiagnosticBuilder::::new(self, DelayedBug, msg).emit() } /// Like `delayed_bug`, but takes an additional span. @@ -879,15 +872,12 @@ impl DiagCtxt { sp: impl Into, msg: impl Into, ) -> ErrorGuaranteed { - DiagnosticBuilder::::new(self, DelayedBug(DelayedBugKind::Normal), msg) - .with_span(sp) - .emit() + DiagnosticBuilder::::new(self, DelayedBug, msg).with_span(sp).emit() } - // FIXME(eddyb) note the comment inside `impl Drop for DiagCtxtInner`, that's - // where the explanation of what "good path" is (also, it should be renamed). + /// Ensures that a diagnostic is printed. See `Level::GoodPathDelayedBug`. pub fn good_path_delayed_bug(&self, msg: impl Into) { - DiagnosticBuilder::<()>::new(self, DelayedBug(DelayedBugKind::GoodPath), msg).emit() + DiagnosticBuilder::<()>::new(self, GoodPathDelayedBug, msg).emit() } #[track_caller] @@ -961,7 +951,7 @@ impl DiagCtxt { pub fn has_errors_or_lint_errors_or_delayed_bugs(&self) -> Option { let inner = self.inner.borrow(); let result = - inner.has_errors() || inner.lint_err_count > 0 || !inner.span_delayed_bugs.is_empty(); + inner.has_errors() || inner.lint_err_count > 0 || !inner.delayed_bugs.is_empty(); result.then(|| { #[allow(deprecated)] ErrorGuaranteed::unchecked_claim_error_was_emitted() @@ -1247,6 +1237,8 @@ impl DiagCtxtInner { } fn emit_diagnostic(&mut self, mut diagnostic: Diagnostic) -> Option { + assert!(diagnostic.level.can_be_top_or_sub().0); + if let Some(expectation_id) = diagnostic.level.get_expectation_id() { // The `LintExpectationId` can be stable or unstable depending on when it was created. // Diagnostics created before the definition of `HirId`s are unstable and can not yet @@ -1268,27 +1260,29 @@ impl DiagCtxtInner { self.future_breakage_diagnostics.push(diagnostic.clone()); } - if matches!(diagnostic.level, DelayedBug(_)) && self.flags.eagerly_emit_delayed_bugs { + if matches!(diagnostic.level, DelayedBug | GoodPathDelayedBug) + && self.flags.eagerly_emit_delayed_bugs + { diagnostic.level = Error; } match diagnostic.level { - // This must come after the possible promotion of `DelayedBug` to `Error` above. + // This must come after the possible promotion of `DelayedBug`/`GoodPathDelayedBug` to + // `Error` above. Fatal | Error if self.treat_next_err_as_bug() => { diagnostic.level = Bug; } - DelayedBug(DelayedBugKind::Normal) => { + DelayedBug => { // FIXME(eddyb) this should check for `has_errors` and stop pushing - // once *any* errors were emitted (and truncate `span_delayed_bugs` + // once *any* errors were emitted (and truncate `delayed_bugs` // when an error is first emitted, also), but maybe there's a case // in which that's not sound? otherwise this is really inefficient. let backtrace = std::backtrace::Backtrace::capture(); - self.span_delayed_bugs - .push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace)); + self.delayed_bugs.push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace)); #[allow(deprecated)] return Some(ErrorGuaranteed::unchecked_claim_error_was_emitted()); } - DelayedBug(DelayedBugKind::GoodPath) => { + GoodPathDelayedBug => { let backtrace = std::backtrace::Backtrace::capture(); self.good_path_delayed_bugs .push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace)); @@ -1392,12 +1386,12 @@ impl DiagCtxtInner { fn flush_delayed(&mut self, kind: DelayedBugKind) { let (bugs, note1) = match kind { DelayedBugKind::Normal => ( - std::mem::take(&mut self.span_delayed_bugs), - "no errors encountered even though `span_delayed_bug` issued", + std::mem::take(&mut self.delayed_bugs), + "no errors encountered even though delayed bugs were created", ), DelayedBugKind::GoodPath => ( std::mem::take(&mut self.good_path_delayed_bugs), - "no warnings or errors encountered even though `good_path_delayed_bugs` issued", + "no warnings or errors encountered even though good path delayed bugs were created", ), }; let note2 = "those delayed bugs will now be shown as internal compiler errors"; @@ -1436,8 +1430,8 @@ impl DiagCtxtInner { let mut bug = if backtrace || self.ice_file.is_none() { bug.decorate() } else { bug.inner }; - // "Undelay" the `DelayedBug`s (into plain `Bug`s). - if !matches!(bug.level, DelayedBug(_)) { + // "Undelay" the delayed bugs (into plain `Bug`s). + if !matches!(bug.level, DelayedBug | GoodPathDelayedBug) { // NOTE(eddyb) not panicking here because we're already producing // an ICE, and the more information the merrier. bug.subdiagnostic(InvalidFlushedDelayedDiagnosticLevel { @@ -1503,85 +1497,89 @@ impl DelayedDiagnostic { } } +/// Level is_error EmissionGuarantee Top-level Sub Used in lints? +/// ----- -------- ----------------- --------- --- -------------- +/// Bug yes BugAbort yes - - +/// Fatal yes FatalAbort/FatalError(*) yes - - +/// Error yes ErrorGuaranteed yes - yes +/// DelayedBug yes ErrorGuaranteed yes - - +/// GoodPathDelayedBug - () yes - - +/// ForceWarning - () yes - lint-only +/// Warning - () yes yes yes +/// Note - () rare yes - +/// OnceNote - () - yes lint-only +/// Help - () rare yes - +/// OnceHelp - () - yes lint-only +/// FailureNote - () rare - - +/// Allow - () yes - lint-only +/// Expect - () yes - lint-only +/// +/// (*) `FatalAbort` normally, `FatalError` in the non-aborting "almost fatal" case that is +/// occasionally used. +/// #[derive(Copy, PartialEq, Eq, Clone, Hash, Debug, Encodable, Decodable)] pub enum Level { /// For bugs in the compiler. Manifests as an ICE (internal compiler error) panic. - /// - /// Its `EmissionGuarantee` is `BugAbort`. Bug, - /// This is a strange one: lets you register an error without emitting it. If compilation ends - /// without any other errors occurring, this will be emitted as a bug. Otherwise, it will be - /// silently dropped. I.e. "expect other errors are emitted" semantics. Useful on code paths - /// that should only be reached when compiling erroneous code. - /// - /// Its `EmissionGuarantee` is `ErrorGuaranteed` for `Normal` delayed bugs, and `()` for - /// `GoodPath` delayed bugs. - DelayedBug(DelayedBugKind), - /// An error that causes an immediate abort. Used for things like configuration errors, /// internal overflows, some file operation errors. - /// - /// Its `EmissionGuarantee` is `FatalAbort`, except in the non-aborting "almost fatal" case - /// that is occasionally used, where it is `FatalError`. Fatal, /// An error in the code being compiled, which prevents compilation from finishing. This is the /// most common case. - /// - /// Its `EmissionGuarantee` is `ErrorGuaranteed`. Error, + /// This is a strange one: lets you register an error without emitting it. If compilation ends + /// without any other errors occurring, this will be emitted as a bug. Otherwise, it will be + /// silently dropped. I.e. "expect other errors are emitted" semantics. Useful on code paths + /// that should only be reached when compiling erroneous code. + DelayedBug, + + /// Like `DelayedBug`, but weaker: lets you register an error without emitting it. If + /// compilation ends without any other diagnostics being emitted (and without an expected lint + /// being suppressed), this will be emitted as a bug. Otherwise, it will be silently dropped. + /// I.e. "expect other diagnostics are emitted (or suppressed)" semantics. Useful on code paths + /// that should only be reached when emitting diagnostics, e.g. for expensive one-time + /// diagnostic formatting operations. + /// + /// FIXME(nnethercote) good path delayed bugs are semantically strange: if printed they produce + /// an ICE, but they don't satisfy `is_error` and they don't guarantee an error is emitted. + /// Plus there's the extra complication with expected (suppressed) lints. They have limited + /// use, and are used in very few places, and "good path" isn't a good name. It would be good + /// to remove them. + GoodPathDelayedBug, + /// A `force-warn` lint warning about the code being compiled. Does not prevent compilation /// from finishing. /// /// The [`LintExpectationId`] is used for expected lint diagnostics. In all other cases this /// should be `None`. - /// - /// Its `EmissionGuarantee` is `()`. ForceWarning(Option), /// A warning about the code being compiled. Does not prevent compilation from finishing. - /// - /// Its `EmissionGuarantee` is `()`. Warning, - /// A message giving additional context. Rare, because notes are more commonly attached to other - /// diagnostics such as errors. - /// - /// Its `EmissionGuarantee` is `()`. + /// A message giving additional context. Note, - /// A note that is only emitted once. Rare, mostly used in circumstances relating to lints. - /// - /// Its `EmissionGuarantee` is `()`. + /// A note that is only emitted once. OnceNote, - /// A message suggesting how to fix something. Rare, because help messages are more commonly - /// attached to other diagnostics such as errors. - /// - /// Its `EmissionGuarantee` is `()`. + /// A message suggesting how to fix something. Help, - /// A help that is only emitted once. Rare. - /// - /// Its `EmissionGuarantee` is `()`. + /// A help that is only emitted once. OnceHelp, /// Similar to `Note`, but used in cases where compilation has failed. When printed for human - /// consumption, it doesn't have any kind of `note:` label. Rare. - /// - /// Its `EmissionGuarantee` is `()`. + /// consumption, it doesn't have any kind of `note:` label. FailureNote, /// Only used for lints. - /// - /// Its `EmissionGuarantee` is `()`. Allow, /// Only used for lints. - /// - /// Its `EmissionGuarantee` is `()`. Expect(LintExpectationId), } @@ -1595,7 +1593,7 @@ impl Level { fn color(self) -> ColorSpec { let mut spec = ColorSpec::new(); match self { - Bug | DelayedBug(_) | Fatal | Error => { + Bug | Fatal | Error | DelayedBug | GoodPathDelayedBug => { spec.set_fg(Some(Color::Red)).set_intense(true); } ForceWarning(_) | Warning => { @@ -1615,7 +1613,7 @@ impl Level { pub fn to_str(self) -> &'static str { match self { - Bug | DelayedBug(_) => "error: internal compiler error", + Bug | DelayedBug | GoodPathDelayedBug => "error: internal compiler error", Fatal | Error => "error", ForceWarning(_) | Warning => "warning", Note | OnceNote => "note", @@ -1635,6 +1633,19 @@ impl Level { _ => None, } } + + // Can this level be used in a top-level diagnostic message and/or a + // subdiagnostic message? + fn can_be_top_or_sub(&self) -> (bool, bool) { + match self { + Bug | DelayedBug | Fatal | Error | GoodPathDelayedBug | ForceWarning(_) + | FailureNote | Allow | Expect(_) => (true, false), + + Warning | Note | Help => (true, true), + + OnceNote | OnceHelp => (false, true), + } + } } // FIXME(eddyb) this doesn't belong here AFAICT, should be moved to callsite. diff --git a/tests/ui/impl-trait/equality-in-canonical-query.clone.stderr b/tests/ui/impl-trait/equality-in-canonical-query.clone.stderr index 1011fc4163bca..0e3cd2ff06099 100644 --- a/tests/ui/impl-trait/equality-in-canonical-query.clone.stderr +++ b/tests/ui/impl-trait/equality-in-canonical-query.clone.stderr @@ -1,4 +1,4 @@ -note: no errors encountered even though `span_delayed_bug` issued +note: no errors encountered even though delayed bugs were created note: those delayed bugs will now be shown as internal compiler errors diff --git a/tests/ui/type-alias-impl-trait/rpit_tait_equality_in_canonical_query.current.stderr b/tests/ui/type-alias-impl-trait/rpit_tait_equality_in_canonical_query.current.stderr index d92bafce142c2..fd76526644bdd 100644 --- a/tests/ui/type-alias-impl-trait/rpit_tait_equality_in_canonical_query.current.stderr +++ b/tests/ui/type-alias-impl-trait/rpit_tait_equality_in_canonical_query.current.stderr @@ -1,4 +1,4 @@ -note: no errors encountered even though `span_delayed_bug` issued +note: no errors encountered even though delayed bugs were created note: those delayed bugs will now be shown as internal compiler errors From d9508a1fd2bdbc2f7c4e2ee28503f15487fdc8ce Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 2 Feb 2024 15:44:22 +1100 Subject: [PATCH 16/20] Make `Emitter::emit_diagnostic` consuming. All the other `emit`/`emit_diagnostic` methods were recently made consuming (e.g. #119606), but this one wasn't. But it makes sense to. Much of this is straightforward, and lots of `clone` calls are avoided. There are a couple of tricky bits. - `Emitter::primary_span_formatted` no longer takes a `Diagnostic` and returns a pair. Instead it takes the two fields from `Diagnostic` that it used (`span` and `suggestions`) as `&mut`, and modifies them. This is necessary to avoid the cloning of `diag.children` in two emitters. - `from_errors_diagnostic` is rearranged so various uses of `diag` occur before the consuming `emit_diagnostic` call. --- compiler/rustc_codegen_ssa/src/back/write.rs | 2 +- .../src/annotate_snippet_emitter_writer.rs | 16 +++--- compiler/rustc_errors/src/emitter.rs | 41 +++++++-------- compiler/rustc_errors/src/json.rs | 52 ++++++++++--------- compiler/rustc_errors/src/lib.rs | 26 ++++++---- .../passes/lint/check_code_block_syntax.rs | 2 +- src/tools/rustfmt/src/parse/session.rs | 20 +++---- 7 files changed, 84 insertions(+), 75 deletions(-) diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 4211f875dd01a..9b24339d2551f 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -1810,7 +1810,7 @@ impl Translate for SharedEmitter { } impl Emitter for SharedEmitter { - fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) { + fn emit_diagnostic(&mut self, diag: rustc_errors::Diagnostic) { let args: FxHashMap = diag.args().map(|(name, arg)| (name.clone(), arg.clone())).collect(); drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs index 949f52ef6b586..1ce75a0381237 100644 --- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs +++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs @@ -44,15 +44,15 @@ impl Translate for AnnotateSnippetEmitter { impl Emitter for AnnotateSnippetEmitter { /// The entry point for the diagnostics generation - fn emit_diagnostic(&mut self, diag: &Diagnostic) { + fn emit_diagnostic(&mut self, mut diag: Diagnostic) { let fluent_args = to_fluent_args(diag.args()); - let mut children = diag.children.clone(); - let (mut primary_span, suggestions) = self.primary_span_formatted(diag, &fluent_args); + let mut suggestions = diag.suggestions.unwrap_or(vec![]); + self.primary_span_formatted(&mut diag.span, &mut suggestions, &fluent_args); self.fix_multispans_in_extern_macros_and_render_macro_backtrace( - &mut primary_span, - &mut children, + &mut diag.span, + &mut diag.children, &diag.level, self.macro_backtrace, ); @@ -62,9 +62,9 @@ impl Emitter for AnnotateSnippetEmitter { &diag.messages, &fluent_args, &diag.code, - &primary_span, - &children, - suggestions, + &diag.span, + &diag.children, + &suggestions, ); } diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index 4be5ed923e5e0..12d947f2098b1 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -193,7 +193,7 @@ pub type DynEmitter = dyn Emitter + DynSend; /// Emitter trait for emitting errors. pub trait Emitter: Translate { /// Emit a structured diagnostic. - fn emit_diagnostic(&mut self, diag: &Diagnostic); + fn emit_diagnostic(&mut self, diag: Diagnostic); /// Emit a notification that an artifact has been output. /// Currently only supported for the JSON format. @@ -230,17 +230,17 @@ pub trait Emitter: Translate { /// /// * If the current `Diagnostic` has only one visible `CodeSuggestion`, /// we format the `help` suggestion depending on the content of the - /// substitutions. In that case, we return the modified span only. + /// substitutions. In that case, we modify the span and clear the + /// suggestions. /// /// * If the current `Diagnostic` has multiple suggestions, - /// we return the original `primary_span` and the original suggestions. - fn primary_span_formatted<'a>( + /// we leave `primary_span` and the suggestions untouched. + fn primary_span_formatted( &mut self, - diag: &'a Diagnostic, + primary_span: &mut MultiSpan, + suggestions: &mut Vec, fluent_args: &FluentArgs<'_>, - ) -> (MultiSpan, &'a [CodeSuggestion]) { - let mut primary_span = diag.span.clone(); - let suggestions = diag.suggestions.as_deref().unwrap_or(&[]); + ) { if let Some((sugg, rest)) = suggestions.split_first() { let msg = self.translate_message(&sugg.msg, fluent_args).map_err(Report::new).unwrap(); if rest.is_empty() && @@ -287,16 +287,15 @@ pub trait Emitter: Translate { primary_span.push_span_label(sugg.substitutions[0].parts[0].span, msg); // We return only the modified primary_span - (primary_span, &[]) + suggestions.clear(); } else { // if there are multiple suggestions, print them all in full // to be consistent. We could try to figure out if we can // make one (or the first one) inline, but that would give // undue importance to a semi-random suggestion - (primary_span, suggestions) } } else { - (primary_span, suggestions) + // do nothing } } @@ -518,16 +517,15 @@ impl Emitter for HumanEmitter { self.sm.as_ref() } - fn emit_diagnostic(&mut self, diag: &Diagnostic) { + fn emit_diagnostic(&mut self, mut diag: Diagnostic) { let fluent_args = to_fluent_args(diag.args()); - let mut children = diag.children.clone(); - let (mut primary_span, suggestions) = self.primary_span_formatted(diag, &fluent_args); - debug!("emit_diagnostic: suggestions={:?}", suggestions); + let mut suggestions = diag.suggestions.unwrap_or(vec![]); + self.primary_span_formatted(&mut diag.span, &mut suggestions, &fluent_args); self.fix_multispans_in_extern_macros_and_render_macro_backtrace( - &mut primary_span, - &mut children, + &mut diag.span, + &mut diag.children, &diag.level, self.macro_backtrace, ); @@ -537,9 +535,9 @@ impl Emitter for HumanEmitter { &diag.messages, &fluent_args, &diag.code, - &primary_span, - &children, - suggestions, + &diag.span, + &diag.children, + &suggestions, self.track_diagnostics.then_some(&diag.emitted_at), ); } @@ -576,9 +574,8 @@ impl Emitter for SilentEmitter { None } - fn emit_diagnostic(&mut self, diag: &Diagnostic) { + fn emit_diagnostic(&mut self, mut diag: Diagnostic) { if diag.level == Level::Fatal { - let mut diag = diag.clone(); diag.note(self.fatal_note.clone()); self.fatal_dcx.emit_diagnostic(diag); } diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs index 6f92299827950..470e3d52452cf 100644 --- a/compiler/rustc_errors/src/json.rs +++ b/compiler/rustc_errors/src/json.rs @@ -176,7 +176,7 @@ impl Translate for JsonEmitter { } impl Emitter for JsonEmitter { - fn emit_diagnostic(&mut self, diag: &crate::Diagnostic) { + fn emit_diagnostic(&mut self, diag: crate::Diagnostic) { let data = Diagnostic::from_errors_diagnostic(diag, self); let result = self.emit(EmitTyped::Diagnostic(data)); if let Err(e) = result { @@ -201,7 +201,7 @@ impl Emitter for JsonEmitter { } FutureBreakageItem { diagnostic: EmitTyped::Diagnostic(Diagnostic::from_errors_diagnostic( - &diag, self, + diag, self, )), } }) @@ -340,7 +340,7 @@ struct UnusedExterns<'a, 'b, 'c> { } impl Diagnostic { - fn from_errors_diagnostic(diag: &crate::Diagnostic, je: &JsonEmitter) -> Diagnostic { + fn from_errors_diagnostic(diag: crate::Diagnostic, je: &JsonEmitter) -> Diagnostic { let args = to_fluent_args(diag.args()); let sugg = diag.suggestions.iter().flatten().map(|sugg| { let translated_message = @@ -382,6 +382,28 @@ impl Diagnostic { Ok(()) } } + + let translated_message = je.translate_messages(&diag.messages, &args); + + let code = if let Some(code) = diag.code { + Some(DiagnosticCode { + code: code.to_string(), + explanation: je.registry.as_ref().unwrap().try_find_description(code).ok(), + }) + } else if let Some(IsLint { name, .. }) = &diag.is_lint { + Some(DiagnosticCode { code: name.to_string(), explanation: None }) + } else { + None + }; + let level = diag.level.to_str(); + let spans = DiagnosticSpan::from_multispan(&diag.span, &args, je); + let children = diag + .children + .iter() + .map(|c| Diagnostic::from_sub_diagnostic(c, &args, je)) + .chain(sugg) + .collect(); + let buf = BufWriter::default(); let output = buf.clone(); je.json_rendered @@ -398,30 +420,12 @@ impl Diagnostic { let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap(); let output = String::from_utf8(output).unwrap(); - let translated_message = je.translate_messages(&diag.messages, &args); - - let code = if let Some(code) = diag.code { - Some(DiagnosticCode { - code: code.to_string(), - explanation: je.registry.as_ref().unwrap().try_find_description(code).ok(), - }) - } else if let Some(IsLint { name, .. }) = &diag.is_lint { - Some(DiagnosticCode { code: name.to_string(), explanation: None }) - } else { - None - }; - Diagnostic { message: translated_message.to_string(), code, - level: diag.level.to_str(), - spans: DiagnosticSpan::from_multispan(&diag.span, &args, je), - children: diag - .children - .iter() - .map(|c| Diagnostic::from_sub_diagnostic(c, &args, je)) - .chain(sugg) - .collect(), + level, + spans, + children, rendered: Some(output), } } diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index b2bd4d8eb956e..a48fcf3af9b02 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -990,9 +990,13 @@ impl DiagCtxt { match (errors.len(), warnings.len()) { (0, 0) => return, - (0, _) => inner - .emitter - .emit_diagnostic(&Diagnostic::new(Warning, DiagnosticMessage::Str(warnings))), + (0, _) => { + // Use `inner.emitter` directly, otherwise the warning might not be emitted, e.g. + // with a configuration like `--cap-lints allow --force-warn bare_trait_objects`. + inner + .emitter + .emit_diagnostic(Diagnostic::new(Warning, DiagnosticMessage::Str(warnings))); + } (_, 0) => { inner.emit_diagnostic(Diagnostic::new(Fatal, errors)); } @@ -1056,7 +1060,7 @@ impl DiagCtxt { } pub fn force_print_diagnostic(&self, db: Diagnostic) { - self.inner.borrow_mut().emitter.emit_diagnostic(&db); + self.inner.borrow_mut().emitter.emit_diagnostic(db); } pub fn emit_diagnostic(&self, diagnostic: Diagnostic) -> Option { @@ -1324,11 +1328,15 @@ impl DiagCtxtInner { !self.emitted_diagnostics.insert(diagnostic_hash) }; + let is_error = diagnostic.is_error(); + let is_lint = diagnostic.is_lint.is_some(); + // Only emit the diagnostic if we've been asked to deduplicate or // haven't already emitted an equivalent diagnostic. if !(self.flags.deduplicate_diagnostics && already_emitted) { debug!(?diagnostic); debug!(?self.emitted_diagnostics); + let already_emitted_sub = |sub: &mut SubDiagnostic| { debug!(?sub); if sub.level != OnceNote && sub.level != OnceHelp { @@ -1340,7 +1348,6 @@ impl DiagCtxtInner { debug!(?diagnostic_hash); !self.emitted_diagnostics.insert(diagnostic_hash) }; - diagnostic.children.extract_if(already_emitted_sub).for_each(|_| {}); if already_emitted { diagnostic.note( @@ -1348,16 +1355,17 @@ impl DiagCtxtInner { ); } - self.emitter.emit_diagnostic(&diagnostic); - if diagnostic.is_error() { + if is_error { self.deduplicated_err_count += 1; } else if matches!(diagnostic.level, ForceWarning(_) | Warning) { self.deduplicated_warn_count += 1; } self.has_printed = true; + + self.emitter.emit_diagnostic(diagnostic); } - if diagnostic.is_error() { - if diagnostic.is_lint.is_some() { + if is_error { + if is_lint { self.lint_err_count += 1; } else { self.err_count += 1; diff --git a/src/librustdoc/passes/lint/check_code_block_syntax.rs b/src/librustdoc/passes/lint/check_code_block_syntax.rs index e73649c722493..6649894f9c25d 100644 --- a/src/librustdoc/passes/lint/check_code_block_syntax.rs +++ b/src/librustdoc/passes/lint/check_code_block_syntax.rs @@ -156,7 +156,7 @@ impl Translate for BufferEmitter { } impl Emitter for BufferEmitter { - fn emit_diagnostic(&mut self, diag: &Diagnostic) { + fn emit_diagnostic(&mut self, diag: Diagnostic) { let mut buffer = self.buffer.borrow_mut(); let fluent_args = to_fluent_args(diag.args()); diff --git a/src/tools/rustfmt/src/parse/session.rs b/src/tools/rustfmt/src/parse/session.rs index 6dc3eac44d43d..f0af401d3da4b 100644 --- a/src/tools/rustfmt/src/parse/session.rs +++ b/src/tools/rustfmt/src/parse/session.rs @@ -47,7 +47,7 @@ impl Emitter for SilentEmitter { None } - fn emit_diagnostic(&mut self, _db: &Diagnostic) {} + fn emit_diagnostic(&mut self, _db: Diagnostic) {} } fn silent_emitter() -> Box { @@ -64,7 +64,7 @@ struct SilentOnIgnoredFilesEmitter { } impl SilentOnIgnoredFilesEmitter { - fn handle_non_ignoreable_error(&mut self, db: &Diagnostic) { + fn handle_non_ignoreable_error(&mut self, db: Diagnostic) { self.has_non_ignorable_parser_errors = true; self.can_reset.store(false, Ordering::Release); self.emitter.emit_diagnostic(db); @@ -86,7 +86,7 @@ impl Emitter for SilentOnIgnoredFilesEmitter { None } - fn emit_diagnostic(&mut self, db: &Diagnostic) { + fn emit_diagnostic(&mut self, db: Diagnostic) { if db.level() == DiagnosticLevel::Fatal { return self.handle_non_ignoreable_error(db); } @@ -365,7 +365,7 @@ mod tests { None } - fn emit_diagnostic(&mut self, _db: &Diagnostic) { + fn emit_diagnostic(&mut self, _db: Diagnostic) { self.num_emitted_errors.fetch_add(1, Ordering::Release); } } @@ -424,7 +424,7 @@ mod tests { ); let span = MultiSpan::from_span(mk_sp(BytePos(0), BytePos(1))); let fatal_diagnostic = build_diagnostic(DiagnosticLevel::Fatal, Some(span)); - emitter.emit_diagnostic(&fatal_diagnostic); + emitter.emit_diagnostic(fatal_diagnostic); assert_eq!(num_emitted_errors.load(Ordering::Acquire), 1); assert_eq!(can_reset_errors.load(Ordering::Acquire), false); } @@ -449,7 +449,7 @@ mod tests { ); let span = MultiSpan::from_span(mk_sp(BytePos(0), BytePos(1))); let non_fatal_diagnostic = build_diagnostic(DiagnosticLevel::Warning, Some(span)); - emitter.emit_diagnostic(&non_fatal_diagnostic); + emitter.emit_diagnostic(non_fatal_diagnostic); assert_eq!(num_emitted_errors.load(Ordering::Acquire), 0); assert_eq!(can_reset_errors.load(Ordering::Acquire), true); } @@ -473,7 +473,7 @@ mod tests { ); let span = MultiSpan::from_span(mk_sp(BytePos(0), BytePos(1))); let non_fatal_diagnostic = build_diagnostic(DiagnosticLevel::Warning, Some(span)); - emitter.emit_diagnostic(&non_fatal_diagnostic); + emitter.emit_diagnostic(non_fatal_diagnostic); assert_eq!(num_emitted_errors.load(Ordering::Acquire), 1); assert_eq!(can_reset_errors.load(Ordering::Acquire), false); } @@ -512,9 +512,9 @@ mod tests { let bar_diagnostic = build_diagnostic(DiagnosticLevel::Warning, Some(bar_span)); let foo_diagnostic = build_diagnostic(DiagnosticLevel::Warning, Some(foo_span)); let fatal_diagnostic = build_diagnostic(DiagnosticLevel::Fatal, None); - emitter.emit_diagnostic(&bar_diagnostic); - emitter.emit_diagnostic(&foo_diagnostic); - emitter.emit_diagnostic(&fatal_diagnostic); + emitter.emit_diagnostic(bar_diagnostic); + emitter.emit_diagnostic(foo_diagnostic); + emitter.emit_diagnostic(fatal_diagnostic); assert_eq!(num_emitted_errors.load(Ordering::Acquire), 2); assert_eq!(can_reset_errors.load(Ordering::Acquire), false); } From c94769a9748233559313c532d524f58ebb643b1d Mon Sep 17 00:00:00 2001 From: Oli Scherer Date: Mon, 5 Feb 2024 22:21:40 +0100 Subject: [PATCH 17/20] Clarify order of operations during interning Co-authored-by: Ralf Jung --- compiler/rustc_const_eval/src/interpret/intern.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index c3a53f90e60a0..38e7843761be8 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -148,6 +148,13 @@ pub fn intern_const_alloc_recursive< // better errors. Maybe we should consider doing validation before interning in the future. while let Some(prov) = todo.pop() { let alloc_id = prov.alloc_id(); + // Crucially, we check this *before* checking whether the `alloc_id` + // has already been interned. The point of this check is to ensure that when + // there are multiple pointers to the same allocation, they are *all* immutable. + // Therefore it would be bad if we only checked the first pointer to any given + // allocation. + // (It is likely not possible to actually have multiple pointers to the same allocation, + // so alternatively we could also check that and ICE if there are multiple such pointers.) if intern_kind != InternKind::Promoted && inner_mutability == Mutability::Not && !prov.immutable() From 411967c0786495d750a60d1cb67087471bc3684f Mon Sep 17 00:00:00 2001 From: Nadrieril Date: Wed, 31 Jan 2024 02:37:44 +0100 Subject: [PATCH 18/20] Zip together `place_ty` and `place_validity` --- .../rustc_pattern_analysis/src/usefulness.rs | 79 +++++++++++-------- 1 file changed, 46 insertions(+), 33 deletions(-) diff --git a/compiler/rustc_pattern_analysis/src/usefulness.rs b/compiler/rustc_pattern_analysis/src/usefulness.rs index bbb68b353e436..246b15ad7d013 100644 --- a/compiler/rustc_pattern_analysis/src/usefulness.rs +++ b/compiler/rustc_pattern_analysis/src/usefulness.rs @@ -767,12 +767,6 @@ impl<'a, Cx: TypeCx> PlaceCtxt<'a, Cx> { fn ctor_arity(&self, ctor: &Constructor) -> usize { self.cx.ctor_arity(ctor, self.ty) } - fn ctor_sub_tys( - &'a self, - ctor: &'a Constructor, - ) -> impl Iterator + ExactSizeIterator + Captures<'a> { - self.cx.ctor_sub_tys(ctor, self.ty) - } fn ctors_for_ty(&self) -> Result, Cx::Error> { self.cx.ctors_for_ty(self.ty) } @@ -828,6 +822,32 @@ impl fmt::Display for ValidityConstraint { } } +/// Data about a place under investigation. +struct PlaceInfo { + /// The type of the place. + ty: Cx::Ty, + /// Whether the place is known to contain valid data. + validity: ValidityConstraint, +} + +impl PlaceInfo { + fn specialize<'a>( + &'a self, + cx: &'a Cx, + ctor: &'a Constructor, + ) -> impl Iterator + ExactSizeIterator + Captures<'a> { + let ctor_sub_tys = cx.ctor_sub_tys(ctor, &self.ty); + let ctor_sub_validity = self.validity.specialize(ctor); + ctor_sub_tys.map(move |ty| PlaceInfo { ty, validity: ctor_sub_validity }) + } +} + +impl Clone for PlaceInfo { + fn clone(&self) -> Self { + Self { ty: self.ty.clone(), validity: self.validity } + } +} + /// Represents a pattern-tuple under investigation. // The three lifetimes are: // - 'p coming from the input @@ -1001,10 +1021,9 @@ struct Matrix<'p, Cx: TypeCx> { /// each column must have the same type. Each column corresponds to a place within the /// scrutinee. rows: Vec>, - /// Track the type of each column/place. - place_ty: SmallVec<[Cx::Ty; 2]>, - /// Track for each column/place whether it contains a known valid value. - place_validity: SmallVec<[ValidityConstraint; 2]>, + /// Track info about each place. Each place corresponds to a column in `rows`, and their types + /// must match. + place_info: SmallVec<[PlaceInfo; 2]>, /// Track whether the virtual wildcard row used to compute exhaustiveness is relevant. See top /// of the file for details on relevancy. wildcard_row_is_relevant: bool, @@ -1032,10 +1051,10 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> { scrut_ty: Cx::Ty, scrut_validity: ValidityConstraint, ) -> Self { + let place_info = PlaceInfo { ty: scrut_ty, validity: scrut_validity }; let mut matrix = Matrix { rows: Vec::with_capacity(arms.len()), - place_ty: smallvec![scrut_ty], - place_validity: smallvec![scrut_validity], + place_info: smallvec![place_info], wildcard_row_is_relevant: true, }; for (row_id, arm) in arms.iter().enumerate() { @@ -1051,11 +1070,11 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> { matrix } - fn head_ty(&self) -> Option<&Cx::Ty> { - self.place_ty.first() + fn head_place(&self) -> Option<&PlaceInfo> { + self.place_info.first() } fn column_count(&self) -> usize { - self.place_ty.len() + self.place_info.len() } fn rows( @@ -1083,18 +1102,13 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> { ctor: &Constructor, ctor_is_relevant: bool, ) -> Result, Cx::Error> { - let ctor_sub_tys = pcx.ctor_sub_tys(ctor); - let arity = ctor_sub_tys.len(); - let specialized_place_ty = ctor_sub_tys.chain(self.place_ty[1..].iter().cloned()).collect(); - let ctor_sub_validity = self.place_validity[0].specialize(ctor); - let specialized_place_validity = std::iter::repeat(ctor_sub_validity) - .take(arity) - .chain(self.place_validity[1..].iter().copied()) - .collect(); + let subfield_place_info = self.place_info[0].specialize(pcx.cx, ctor); + let arity = subfield_place_info.len(); + let specialized_place_info = + subfield_place_info.chain(self.place_info[1..].iter().cloned()).collect(); let mut matrix = Matrix { rows: Vec::new(), - place_ty: specialized_place_ty, - place_validity: specialized_place_validity, + place_info: specialized_place_info, wildcard_row_is_relevant: self.wildcard_row_is_relevant && ctor_is_relevant, }; for (i, row) in self.rows().enumerate() { @@ -1127,11 +1141,11 @@ impl<'p, Cx: TypeCx> fmt::Debug for Matrix<'p, Cx> { .map(|row| row.iter().map(|pat| format!("{pat:?}")).collect()) .collect(); pretty_printed_matrix - .push(self.place_validity.iter().map(|validity| format!("{validity}")).collect()); + .push(self.place_info.iter().map(|place| format!("{}", place.validity)).collect()); let column_count = self.column_count(); assert!(self.rows.iter().all(|row| row.len() == column_count)); - assert!(self.place_validity.len() == column_count); + assert!(self.place_info.len() == column_count); let column_widths: Vec = (0..column_count) .map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0)) .collect(); @@ -1447,7 +1461,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( return Ok(WitnessMatrix::empty()); } - let Some(ty) = matrix.head_ty().cloned() else { + let Some(place) = matrix.head_place() else { // The base case: there are no columns in the matrix. We are morally pattern-matching on (). // A row is useful iff it has no (unguarded) rows above it. let mut useful = true; // Whether the next row is useful. @@ -1467,18 +1481,17 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( }; }; - debug!("ty: {ty:?}"); - let pcx = &PlaceCtxt { cx: mcx.tycx, ty: &ty }; + let ty = &place.ty.clone(); // Clone it out so we can mutate `matrix` later. + let pcx = &PlaceCtxt { cx: mcx.tycx, ty }; + debug!("ty: {:?}", pcx.ty); let ctors_for_ty = pcx.ctors_for_ty()?; - // Whether the place/column we are inspecting is known to contain valid data. - let place_validity = matrix.place_validity[0]; // We treat match scrutinees of type `!` or `EmptyEnum` differently. let is_toplevel_exception = is_top_level && matches!(ctors_for_ty, ConstructorSet::NoConstructors); // Whether empty patterns are counted as useful or not. We only warn an empty arm unreachable if // it is guaranteed unreachable by the opsem (i.e. if the place is `known_valid`). - let empty_arms_are_unreachable = place_validity.is_known_valid() + let empty_arms_are_unreachable = place.validity.is_known_valid() && (is_toplevel_exception || mcx.tycx.is_exhaustive_patterns_feature_on() || mcx.tycx.is_min_exhaustive_patterns_feature_on()); From 6cac1c459ec1f2aa7dd31e9b1b90040c906c64f5 Mon Sep 17 00:00:00 2001 From: Nadrieril Date: Wed, 31 Jan 2024 02:46:10 +0100 Subject: [PATCH 19/20] Track `is_top_level` via `PlaceInfo` --- .../rustc_pattern_analysis/src/usefulness.rs | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/compiler/rustc_pattern_analysis/src/usefulness.rs b/compiler/rustc_pattern_analysis/src/usefulness.rs index 246b15ad7d013..1ac984ce67eec 100644 --- a/compiler/rustc_pattern_analysis/src/usefulness.rs +++ b/compiler/rustc_pattern_analysis/src/usefulness.rs @@ -828,6 +828,8 @@ struct PlaceInfo { ty: Cx::Ty, /// Whether the place is known to contain valid data. validity: ValidityConstraint, + /// Whether the place is the scrutinee itself or a subplace of it. + is_scrutinee: bool, } impl PlaceInfo { @@ -838,13 +840,17 @@ impl PlaceInfo { ) -> impl Iterator + ExactSizeIterator + Captures<'a> { let ctor_sub_tys = cx.ctor_sub_tys(ctor, &self.ty); let ctor_sub_validity = self.validity.specialize(ctor); - ctor_sub_tys.map(move |ty| PlaceInfo { ty, validity: ctor_sub_validity }) + ctor_sub_tys.map(move |ty| PlaceInfo { + ty, + validity: ctor_sub_validity, + is_scrutinee: false, + }) } } impl Clone for PlaceInfo { fn clone(&self) -> Self { - Self { ty: self.ty.clone(), validity: self.validity } + Self { ty: self.ty.clone(), validity: self.validity, is_scrutinee: self.is_scrutinee } } } @@ -1051,7 +1057,7 @@ impl<'p, Cx: TypeCx> Matrix<'p, Cx> { scrut_ty: Cx::Ty, scrut_validity: ValidityConstraint, ) -> Self { - let place_info = PlaceInfo { ty: scrut_ty, validity: scrut_validity }; + let place_info = PlaceInfo { ty: scrut_ty, validity: scrut_validity, is_scrutinee: true }; let mut matrix = Matrix { rows: Vec::with_capacity(arms.len()), place_info: smallvec![place_info], @@ -1446,11 +1452,10 @@ fn collect_overlapping_range_endpoints<'p, Cx: TypeCx>( /// - unspecialization, where we lift the results from the previous step into results for this step /// (using `apply_constructor` and by updating `row.useful` for each parent row). /// This is all explained at the top of the file. -#[instrument(level = "debug", skip(mcx, is_top_level), ret)] +#[instrument(level = "debug", skip(mcx), ret)] fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( mcx: UsefulnessCtxt<'a, Cx>, matrix: &mut Matrix<'p, Cx>, - is_top_level: bool, ) -> Result, Cx::Error> { debug_assert!(matrix.rows().all(|r| r.len() == matrix.column_count())); @@ -1488,7 +1493,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( // We treat match scrutinees of type `!` or `EmptyEnum` differently. let is_toplevel_exception = - is_top_level && matches!(ctors_for_ty, ConstructorSet::NoConstructors); + place.is_scrutinee && matches!(ctors_for_ty, ConstructorSet::NoConstructors); // Whether empty patterns are counted as useful or not. We only warn an empty arm unreachable if // it is guaranteed unreachable by the opsem (i.e. if the place is `known_valid`). let empty_arms_are_unreachable = place.validity.is_known_valid() @@ -1517,7 +1522,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( // Decide what constructors to report. let is_integers = matches!(ctors_for_ty, ConstructorSet::Integers { .. }); - let always_report_all = is_top_level && !is_integers; + let always_report_all = place.is_scrutinee && !is_integers; // Whether we should report "Enum::A and Enum::C are missing" or "_ is missing". let report_individual_missing_ctors = always_report_all || !all_missing; // Which constructors are considered missing. We ensure that `!missing_ctors.is_empty() => @@ -1538,7 +1543,7 @@ fn compute_exhaustiveness_and_usefulness<'a, 'p, Cx: TypeCx>( let ctor_is_relevant = matches!(ctor, Constructor::Missing) || missing_ctors.is_empty(); let mut spec_matrix = matrix.specialize_constructor(pcx, &ctor, ctor_is_relevant)?; let mut witnesses = ensure_sufficient_stack(|| { - compute_exhaustiveness_and_usefulness(mcx, &mut spec_matrix, false) + compute_exhaustiveness_and_usefulness(mcx, &mut spec_matrix) })?; // Transform witnesses for `spec_matrix` into witnesses for `matrix`. @@ -1613,8 +1618,7 @@ pub fn compute_match_usefulness<'p, Cx: TypeCx>( ) -> Result, Cx::Error> { let cx = UsefulnessCtxt { tycx }; let mut matrix = Matrix::new(arms, scrut_ty, scrut_validity); - let non_exhaustiveness_witnesses = - compute_exhaustiveness_and_usefulness(cx, &mut matrix, true)?; + let non_exhaustiveness_witnesses = compute_exhaustiveness_and_usefulness(cx, &mut matrix)?; let non_exhaustiveness_witnesses: Vec<_> = non_exhaustiveness_witnesses.single_column(); let arm_usefulness: Vec<_> = arms From aa3f1a290d0f7e6884339ef047cdd49fde3b21f2 Mon Sep 17 00:00:00 2001 From: Markus Reiter Date: Tue, 6 Feb 2024 09:04:19 +0100 Subject: [PATCH 20/20] Update test output. --- .../ui/print_type_sizes/niche-filling.stdout | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tests/ui/print_type_sizes/niche-filling.stdout b/tests/ui/print_type_sizes/niche-filling.stdout index b53b893660321..44a6835df5152 100644 --- a/tests/ui/print_type_sizes/niche-filling.stdout +++ b/tests/ui/print_type_sizes/niche-filling.stdout @@ -1,3 +1,30 @@ +print-type-size type: `core::fmt::rt::Placeholder`: 56 bytes, alignment: 8 bytes +print-type-size field `.precision`: 16 bytes +print-type-size field `.width`: 16 bytes +print-type-size field `.position`: 8 bytes +print-type-size field `.fill`: 4 bytes +print-type-size field `.flags`: 4 bytes +print-type-size field `.align`: 1 bytes +print-type-size end padding: 7 bytes +print-type-size type: `std::fmt::Arguments<'_>`: 48 bytes, alignment: 8 bytes +print-type-size field `.pieces`: 16 bytes +print-type-size field `.args`: 16 bytes +print-type-size field `.fmt`: 16 bytes +print-type-size type: `std::panic::Location<'_>`: 24 bytes, alignment: 8 bytes +print-type-size field `.file`: 16 bytes +print-type-size field `.line`: 4 bytes +print-type-size field `.col`: 4 bytes +print-type-size type: `core::fmt::rt::Count`: 16 bytes, alignment: 8 bytes +print-type-size discriminant: 8 bytes +print-type-size variant `Is`: 8 bytes +print-type-size field `.0`: 8 bytes +print-type-size variant `Param`: 8 bytes +print-type-size field `.0`: 8 bytes +print-type-size variant `Implied`: 0 bytes +print-type-size type: `std::option::Option<&[core::fmt::rt::Placeholder]>`: 16 bytes, alignment: 8 bytes +print-type-size variant `Some`: 16 bytes +print-type-size field `.0`: 16 bytes +print-type-size variant `None`: 0 bytes print-type-size type: `IndirectNonZero`: 12 bytes, alignment: 4 bytes print-type-size field `.nested`: 8 bytes print-type-size field `.post`: 2 bytes @@ -68,8 +95,18 @@ print-type-size type: `Union2, u32>`: 4 bytes, alignment: print-type-size variant `Union2`: 4 bytes print-type-size field `.a`: 4 bytes print-type-size field `.b`: 4 bytes, offset: 0 bytes, alignment: 4 bytes +print-type-size type: `std::mem::ManuallyDrop>>`: 4 bytes, alignment: 4 bytes +print-type-size field `.value`: 4 bytes +print-type-size type: `std::mem::MaybeUninit>>`: 4 bytes, alignment: 4 bytes +print-type-size variant `MaybeUninit`: 4 bytes +print-type-size field `.uninit`: 0 bytes +print-type-size field `.value`: 4 bytes print-type-size type: `std::num::NonZero`: 4 bytes, alignment: 4 bytes print-type-size field `.0`: 4 bytes +print-type-size type: `std::option::Option>`: 4 bytes, alignment: 4 bytes +print-type-size variant `Some`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size variant `None`: 0 bytes print-type-size type: `Enum4<(), (), (), MyOption>`: 2 bytes, alignment: 1 bytes print-type-size variant `Four`: 2 bytes print-type-size field `.0`: 2 bytes @@ -105,6 +142,12 @@ print-type-size type: `MyOption`: 1 bytes, alignment: 1 byte print-type-size variant `Some`: 1 bytes print-type-size field `.0`: 1 bytes print-type-size variant `None`: 0 bytes +print-type-size type: `core::fmt::rt::Alignment`: 1 bytes, alignment: 1 bytes +print-type-size discriminant: 1 bytes +print-type-size variant `Left`: 0 bytes +print-type-size variant `Right`: 0 bytes +print-type-size variant `Center`: 0 bytes +print-type-size variant `Unknown`: 0 bytes print-type-size type: `std::cmp::Ordering`: 1 bytes, alignment: 1 bytes print-type-size discriminant: 1 bytes print-type-size variant `Less`: 0 bytes