From 292de22276467bc786f38fd31b8bc1889f04efb4 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 6 Jul 2022 09:50:17 +1000 Subject: [PATCH 01/11] Add a struct with an unsized field to the `deriving-all-codegen.rs` test. It's an interesting case, requiring the use of `&&` in `Debug::fmt`. --- src/test/ui/deriving/deriving-all-codegen.rs | 6 +- .../ui/deriving/deriving-all-codegen.stdout | 55 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/src/test/ui/deriving/deriving-all-codegen.rs b/src/test/ui/deriving/deriving-all-codegen.rs index 1a651b2074c59..157994c0d1724 100644 --- a/src/test/ui/deriving/deriving-all-codegen.rs +++ b/src/test/ui/deriving/deriving-all-codegen.rs @@ -31,9 +31,13 @@ struct Point { // A large struct. #[derive(Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] struct Big { - b1: u32, b2: u32, b3: u32, b4: u32, b5: u32, b6: u32, b7: u32, b8:u32, + b1: u32, b2: u32, b3: u32, b4: u32, b5: u32, b6: u32, b7: u32, b8: u32, } +// A struct with an unsized field. Some derives are not usable in this case. +#[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +struct Unsized([u32]); + // A packed tuple struct. #[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] #[repr(packed)] diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 9b10192d75a13..38c26f4942e01 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -367,6 +367,61 @@ impl ::core::cmp::Ord for Big { } } +// A struct with an unsized field. Some derives are not usable in this case. +struct Unsized([u32]); +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::fmt::Debug for Unsized { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Unsized", + &&self.0) + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::hash::Hash for Unsized { + fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { + ::core::hash::Hash::hash(&self.0, state) + } +} +impl ::core::marker::StructuralPartialEq for Unsized {} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::PartialEq for Unsized { + #[inline] + fn eq(&self, other: &Unsized) -> bool { self.0 == other.0 } + #[inline] + fn ne(&self, other: &Unsized) -> bool { self.0 != other.0 } +} +impl ::core::marker::StructuralEq for Unsized {} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::Eq for Unsized { + #[inline] + #[doc(hidden)] + #[no_coverage] + fn assert_receiver_is_total_eq(&self) -> () { + let _: ::core::cmp::AssertParamIsEq<[u32]>; + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::PartialOrd for Unsized { + #[inline] + fn partial_cmp(&self, other: &Unsized) + -> ::core::option::Option<::core::cmp::Ordering> { + ::core::cmp::PartialOrd::partial_cmp(&self.0, &other.0) + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::Ord for Unsized { + #[inline] + fn cmp(&self, other: &Unsized) -> ::core::cmp::Ordering { + ::core::cmp::Ord::cmp(&self.0, &other.0) + } +} + // A packed tuple struct. #[repr(packed)] struct Packed(u32); From 1bfe5f1b0d480307697348b978a117226dedb156 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 6 Jul 2022 15:19:43 +1000 Subject: [PATCH 02/11] Add a non-`Copy` packed struct to `deriving-all-codegen.rs`. Because the generatedd code is different to a `Copy` packed struct. --- src/test/ui/deriving/deriving-all-codegen.rs | 13 +- .../ui/deriving/deriving-all-codegen.stdout | 133 +++++++++++++++--- 2 files changed, 124 insertions(+), 22 deletions(-) diff --git a/src/test/ui/deriving/deriving-all-codegen.rs b/src/test/ui/deriving/deriving-all-codegen.rs index 157994c0d1724..311b9171c6bd9 100644 --- a/src/test/ui/deriving/deriving-all-codegen.rs +++ b/src/test/ui/deriving/deriving-all-codegen.rs @@ -38,10 +38,19 @@ struct Big { #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] struct Unsized([u32]); -// A packed tuple struct. +// A packed tuple struct that impls `Copy`. #[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] #[repr(packed)] -struct Packed(u32); +struct PackedCopy(u32); + +// A packed tuple struct that does not impl `Copy`. Note that the alignment of +// the field must be 1 for this code to be valid. Otherwise it triggers an +// error "`#[derive]` can't be used on a `#[repr(packed)]` struct that does not +// derive Copy (error E0133)" at MIR building time. This is a weird case and +// it's possible that this struct is not supposed to work, but for now it does. +#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[repr(packed)] +struct PackedNonCopy(u8); // An empty enum. #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 38c26f4942e01..8470f14f5d4c4 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -422,65 +422,67 @@ impl ::core::cmp::Ord for Unsized { } } -// A packed tuple struct. +// A packed tuple struct that impls `Copy`. #[repr(packed)] -struct Packed(u32); +struct PackedCopy(u32); #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::clone::Clone for Packed { +impl ::core::clone::Clone for PackedCopy { #[inline] - fn clone(&self) -> Packed { + fn clone(&self) -> PackedCopy { let _: ::core::clone::AssertParamIsClone; *self } } #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::marker::Copy for Packed { } +impl ::core::marker::Copy for PackedCopy { } #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::fmt::Debug for Packed { +impl ::core::fmt::Debug for PackedCopy { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { let Self(__self_0_0) = *self; - ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Packed", + ::core::fmt::Formatter::debug_tuple_field1_finish(f, "PackedCopy", &&__self_0_0) } } #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::default::Default for Packed { +impl ::core::default::Default for PackedCopy { #[inline] - fn default() -> Packed { Packed(::core::default::Default::default()) } + fn default() -> PackedCopy { + PackedCopy(::core::default::Default::default()) + } } #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::hash::Hash for Packed { +impl ::core::hash::Hash for PackedCopy { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { let Self(__self_0_0) = *self; ::core::hash::Hash::hash(&__self_0_0, state) } } -impl ::core::marker::StructuralPartialEq for Packed {} +impl ::core::marker::StructuralPartialEq for PackedCopy {} #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::cmp::PartialEq for Packed { +impl ::core::cmp::PartialEq for PackedCopy { #[inline] - fn eq(&self, other: &Packed) -> bool { + fn eq(&self, other: &PackedCopy) -> bool { let Self(__self_0_0) = *self; let Self(__self_1_0) = *other; __self_0_0 == __self_1_0 } #[inline] - fn ne(&self, other: &Packed) -> bool { + fn ne(&self, other: &PackedCopy) -> bool { let Self(__self_0_0) = *self; let Self(__self_1_0) = *other; __self_0_0 != __self_1_0 } } -impl ::core::marker::StructuralEq for Packed {} +impl ::core::marker::StructuralEq for PackedCopy {} #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::cmp::Eq for Packed { +impl ::core::cmp::Eq for PackedCopy { #[inline] #[doc(hidden)] #[no_coverage] @@ -490,9 +492,9 @@ impl ::core::cmp::Eq for Packed { } #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::cmp::PartialOrd for Packed { +impl ::core::cmp::PartialOrd for PackedCopy { #[inline] - fn partial_cmp(&self, other: &Packed) + fn partial_cmp(&self, other: &PackedCopy) -> ::core::option::Option<::core::cmp::Ordering> { let Self(__self_0_0) = *self; let Self(__self_1_0) = *other; @@ -501,15 +503,106 @@ impl ::core::cmp::PartialOrd for Packed { } #[automatically_derived] #[allow(unused_qualifications)] -impl ::core::cmp::Ord for Packed { +impl ::core::cmp::Ord for PackedCopy { #[inline] - fn cmp(&self, other: &Packed) -> ::core::cmp::Ordering { + fn cmp(&self, other: &PackedCopy) -> ::core::cmp::Ordering { let Self(__self_0_0) = *self; let Self(__self_1_0) = *other; ::core::cmp::Ord::cmp(&__self_0_0, &__self_1_0) } } +// A packed tuple struct that does not impl `Copy`. Note that the alignment of +// the field must be 1 for this code to be valid. Otherwise it triggers an +// error "`#[derive]` can't be used on a `#[repr(packed)]` struct that does not +// derive Copy (error E0133)" at MIR building time. This is a weird case and +// it's possible that this struct is not supposed to work, but for now it does. +#[repr(packed)] +struct PackedNonCopy(u8); +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::clone::Clone for PackedNonCopy { + #[inline] + fn clone(&self) -> PackedNonCopy { + let Self(ref __self_0_0) = *self; + PackedNonCopy(::core::clone::Clone::clone(&*__self_0_0)) + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::fmt::Debug for PackedNonCopy { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + let Self(ref __self_0_0) = *self; + ::core::fmt::Formatter::debug_tuple_field1_finish(f, "PackedNonCopy", + &&*__self_0_0) + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::default::Default for PackedNonCopy { + #[inline] + fn default() -> PackedNonCopy { + PackedNonCopy(::core::default::Default::default()) + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::hash::Hash for PackedNonCopy { + fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { + let Self(ref __self_0_0) = *self; + ::core::hash::Hash::hash(&*__self_0_0, state) + } +} +impl ::core::marker::StructuralPartialEq for PackedNonCopy {} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::PartialEq for PackedNonCopy { + #[inline] + fn eq(&self, other: &PackedNonCopy) -> bool { + let Self(ref __self_0_0) = *self; + let Self(ref __self_1_0) = *other; + *__self_0_0 == *__self_1_0 + } + #[inline] + fn ne(&self, other: &PackedNonCopy) -> bool { + let Self(ref __self_0_0) = *self; + let Self(ref __self_1_0) = *other; + *__self_0_0 != *__self_1_0 + } +} +impl ::core::marker::StructuralEq for PackedNonCopy {} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::Eq for PackedNonCopy { + #[inline] + #[doc(hidden)] + #[no_coverage] + fn assert_receiver_is_total_eq(&self) -> () { + let _: ::core::cmp::AssertParamIsEq; + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::PartialOrd for PackedNonCopy { + #[inline] + fn partial_cmp(&self, other: &PackedNonCopy) + -> ::core::option::Option<::core::cmp::Ordering> { + let Self(ref __self_0_0) = *self; + let Self(ref __self_1_0) = *other; + ::core::cmp::PartialOrd::partial_cmp(&*__self_0_0, &*__self_1_0) + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::Ord for PackedNonCopy { + #[inline] + fn cmp(&self, other: &PackedNonCopy) -> ::core::cmp::Ordering { + let Self(ref __self_0_0) = *self; + let Self(ref __self_1_0) = *other; + ::core::cmp::Ord::cmp(&*__self_0_0, &*__self_1_0) + } +} + // An empty enum. enum Enum0 {} #[automatically_derived] From a42e50e230c5f6047d222f24d2e1ea5988039bee Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 8 Jul 2022 11:36:08 +1000 Subject: [PATCH 03/11] Add a fieldless enum with one variant to `deriving-all-codegen.rs`. Because the generated code is different to fieldless enum with multiple variants. --- src/test/ui/deriving/deriving-all-codegen.rs | 7 ++ .../ui/deriving/deriving-all-codegen.stdout | 74 +++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/src/test/ui/deriving/deriving-all-codegen.rs b/src/test/ui/deriving/deriving-all-codegen.rs index 311b9171c6bd9..aef79ae8a5b8d 100644 --- a/src/test/ui/deriving/deriving-all-codegen.rs +++ b/src/test/ui/deriving/deriving-all-codegen.rs @@ -62,6 +62,13 @@ enum Enum1 { Single { x: u32 } } +// A C-like, fieldless enum with a single variant. +#[derive(Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum Fieldless1 { + #[default] + A, +} + // A C-like, fieldless enum. #[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] enum Fieldless { diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 8470f14f5d4c4..7a1df7046b55c 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -759,6 +759,80 @@ impl ::core::cmp::Ord for Enum1 { } } +// A C-like, fieldless enum with a single variant. +enum Fieldless1 { + + #[default] + A, +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::clone::Clone for Fieldless1 { + #[inline] + fn clone(&self) -> Fieldless1 { + match &*self { &Fieldless1::A => Fieldless1::A, } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::fmt::Debug for Fieldless1 { + fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + match &*self { + &Fieldless1::A => ::core::fmt::Formatter::write_str(f, "A"), + } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::default::Default for Fieldless1 { + #[inline] + fn default() -> Fieldless1 { Self::A } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::hash::Hash for Fieldless1 { + fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { + match &*self { _ => {} } + } +} +impl ::core::marker::StructuralPartialEq for Fieldless1 {} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::PartialEq for Fieldless1 { + #[inline] + fn eq(&self, other: &Fieldless1) -> bool { + match (&*self, &*other) { _ => true, } + } +} +impl ::core::marker::StructuralEq for Fieldless1 {} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::Eq for Fieldless1 { + #[inline] + #[doc(hidden)] + #[no_coverage] + fn assert_receiver_is_total_eq(&self) -> () {} +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::PartialOrd for Fieldless1 { + #[inline] + fn partial_cmp(&self, other: &Fieldless1) + -> ::core::option::Option<::core::cmp::Ordering> { + match (&*self, &*other) { + _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), + } + } +} +#[automatically_derived] +#[allow(unused_qualifications)] +impl ::core::cmp::Ord for Fieldless1 { + #[inline] + fn cmp(&self, other: &Fieldless1) -> ::core::cmp::Ordering { + match (&*self, &*other) { _ => ::core::cmp::Ordering::Equal, } + } +} + // A C-like, fieldless enum. enum Fieldless { From f314ece27503a7518b91b011ba74709914204913 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 6 Jul 2022 16:13:20 +1000 Subject: [PATCH 04/11] Remove `mutbl` argument from `create_struct_patterns`. It's always `ast::Mutability::Not`. --- compiler/rustc_builtin_macros/src/deriving/generic/mod.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 74e18bffc2ec9..e45f676e880ac 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -1072,7 +1072,6 @@ impl<'a> MethodDef<'a> { struct_path, struct_def, &prefixes, - ast::Mutability::Not, use_temporaries, ); @@ -1219,7 +1218,6 @@ impl<'a> MethodDef<'a> { variant_path, &variant.data, &prefixes, - ast::Mutability::Not, use_temporaries, ) .into_iter() @@ -1453,7 +1451,6 @@ impl<'a> TraitDef<'a> { struct_path: ast::Path, struct_def: &'a VariantData, prefixes: &[String], - mutbl: ast::Mutability, use_temporaries: bool, ) -> Vec> { prefixes @@ -1465,7 +1462,7 @@ impl<'a> TraitDef<'a> { let binding_mode = if use_temporaries { ast::BindingMode::ByValue(ast::Mutability::Not) } else { - ast::BindingMode::ByRef(mutbl) + ast::BindingMode::ByRef(ast::Mutability::Not) }; let ident = self.mk_pattern_ident(prefix, i); let path = ident.with_span_pos(sp); From 277bc9641d5585fcb2d94440efc6b1880a74fd64 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 6 Jul 2022 16:13:51 +1000 Subject: [PATCH 05/11] Remove unnecessary sigils and `ref`s in derived code. E.g. improving code like this: ``` match &*self { &Enum1::Single { x: ref __self_0 } => { ::core::hash::Hash::hash(&*__self_0, state) } } ``` to this: ``` match self { Enum1::Single { x: __self_0 } => { ::core::hash::Hash::hash(&*__self_0, state) } } ``` by removing the `&*`, the `&`, and the `ref`. I suspect the current generated code predates deref-coercion. The commit also gets rid of `use_temporaries`, instead passing around `always_copy`, which makes things a little clearer. And it fixes up some comments. --- .../src/deriving/generic/mod.rs | 139 +++++------- .../src/deriving/generic/ty.rs | 5 +- .../ui/deriving/deriving-all-codegen.stdout | 212 +++++++++--------- 3 files changed, 160 insertions(+), 196 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index e45f676e880ac..dbac6e61ea9aa 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -183,7 +183,6 @@ use rustc_ast::ptr::P; use rustc_ast::{self as ast, BinOpKind, EnumDef, Expr, Generics, PatKind}; use rustc_ast::{GenericArg, GenericParamKind, VariantData}; use rustc_attr as attr; -use rustc_data_structures::map_in_place::MapInPlace; use rustc_expand::base::{Annotatable, ExtCtxt}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::Span; @@ -455,7 +454,6 @@ impl<'a> TraitDef<'a> { }; let container_id = cx.current_expansion.id.expn_data().parent.expect_local(); let always_copy = has_no_type_params && cx.resolver.has_derive_copy(container_id); - let use_temporaries = is_packed && always_copy; let newitem = match item.kind { ast::ItemKind::Struct(ref struct_def, ref generics) => self.expand_struct_def( @@ -464,11 +462,11 @@ impl<'a> TraitDef<'a> { item.ident, generics, from_scratch, - use_temporaries, is_packed, + always_copy, ), ast::ItemKind::Enum(ref enum_def, ref generics) => { - // We ignore `use_temporaries` here, because + // We ignore `is_packed`/`always_copy` here, because // `repr(packed)` enums cause an error later on. // // This can only cause further compilation errors @@ -484,8 +482,8 @@ impl<'a> TraitDef<'a> { item.ident, generics, from_scratch, - use_temporaries, is_packed, + always_copy, ) } else { cx.span_err(mitem.span, "this trait cannot be derived for unions"); @@ -766,8 +764,8 @@ impl<'a> TraitDef<'a> { type_ident: Ident, generics: &Generics, from_scratch: bool, - use_temporaries: bool, is_packed: bool, + always_copy: bool, ) -> P { let field_tys: Vec> = struct_def.fields().iter().map(|field| field.ty.clone()).collect(); @@ -795,8 +793,8 @@ impl<'a> TraitDef<'a> { type_ident, &selflike_args, &nonselflike_args, - use_temporaries, is_packed, + always_copy, ) }; @@ -937,9 +935,7 @@ impl<'a> MethodDef<'a> { match ty { // Selflike (`&Self`) arguments only occur in non-static methods. - Ref(box Self_, _) if !self.is_static() => { - selflike_args.push(cx.expr_deref(span, arg_expr)) - } + Ref(box Self_, _) if !self.is_static() => selflike_args.push(arg_expr), Self_ => cx.span_bug(span, "`Self` in non-return position"), _ => nonselflike_args.push(arg_expr), } @@ -1025,9 +1021,9 @@ impl<'a> MethodDef<'a> { /// # struct A { x: i32, y: i32 } /// impl PartialEq for A { /// fn eq(&self, other: &A) -> bool { - /// let Self { x: ref __self_0_0, y: ref __self_0_1 } = *self; - /// let Self { x: ref __self_1_0, y: ref __self_1_1 } = *other; - /// *__self_0_0 == *__self_1_0 && *__self_0_1 == *__self_1_1 + /// let Self { x: __self_0_0, y: __self_0_1 } = *self; + /// let Self { x: __self_1_0, y: __self_1_1 } = *other; + /// __self_0_0 == __self_1_0 && __self_0_1 == __self_1_1 /// } /// } /// ``` @@ -1039,8 +1035,8 @@ impl<'a> MethodDef<'a> { type_ident: Ident, selflike_args: &[P], nonselflike_args: &[P], - use_temporaries: bool, is_packed: bool, + always_copy: bool, ) -> BlockOrExpr { let span = trait_.span; assert!(selflike_args.len() == 1 || selflike_args.len() == 2); @@ -1062,23 +1058,21 @@ impl<'a> MethodDef<'a> { } else { let prefixes: Vec<_> = (0..selflike_args.len()).map(|i| format!("__self_{}", i)).collect(); + let no_deref = always_copy; let selflike_fields = - trait_.create_struct_pattern_fields(cx, struct_def, &prefixes, use_temporaries); + trait_.create_struct_pattern_fields(cx, struct_def, &prefixes, no_deref); let mut body = mk_body(cx, selflike_fields); let struct_path = cx.path(span, vec![Ident::new(kw::SelfUpper, type_ident.span)]); - let patterns = trait_.create_struct_patterns( - cx, - struct_path, - struct_def, - &prefixes, - use_temporaries, - ); + let use_ref_pat = is_packed && !always_copy; + let patterns = + trait_.create_struct_patterns(cx, struct_path, struct_def, &prefixes, use_ref_pat); // Do the let-destructuring. let mut stmts: Vec<_> = iter::zip(selflike_args, patterns) .map(|(selflike_arg_expr, pat)| { - cx.stmt_let_pat(span, pat, selflike_arg_expr.clone()) + let selflike_arg_expr = cx.expr_deref(span, selflike_arg_expr.clone()); + cx.stmt_let_pat(span, pat, selflike_arg_expr) }) .collect(); stmts.extend(std::mem::take(&mut body.0)); @@ -1118,18 +1112,16 @@ impl<'a> MethodDef<'a> { /// impl ::core::cmp::PartialEq for A { /// #[inline] /// fn eq(&self, other: &A) -> bool { - /// { - /// let __self_vi = ::core::intrinsics::discriminant_value(&*self); - /// let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); - /// if true && __self_vi == __arg_1_vi { - /// match (&*self, &*other) { - /// (&A::A2(ref __self_0), &A::A2(ref __arg_1_0)) => - /// (*__self_0) == (*__arg_1_0), - /// _ => true, - /// } - /// } else { - /// false // catch-all handler + /// let __self_vi = ::core::intrinsics::discriminant_value(self); + /// let __arg_1_vi = ::core::intrinsics::discriminant_value(other); + /// if __self_vi == __arg_1_vi { + /// match (self, other) { + /// (A::A2(__self_0), A::A2(__arg_1_0)) => + /// *__self_0 == *__arg_1_0, + /// _ => true, /// } + /// } else { + /// false // catch-all handler /// } /// } /// } @@ -1202,28 +1194,21 @@ impl<'a> MethodDef<'a> { // A single arm has form (&VariantK, &VariantK, ...) => BodyK // (see "Final wrinkle" note below for why.) - let use_temporaries = false; // enums can't be repr(packed) - let fields = trait_.create_struct_pattern_fields( + let no_deref = false; // because enums can't be repr(packed) + let fields = + trait_.create_struct_pattern_fields(cx, &variant.data, &prefixes, no_deref); + + let sp = variant.span.with_ctxt(trait_.span.ctxt()); + let variant_path = cx.path(sp, vec![type_ident, variant.ident]); + let use_ref_pat = false; // because enums can't be repr(packed) + let mut subpats: Vec<_> = trait_.create_struct_patterns( cx, + variant_path, &variant.data, &prefixes, - use_temporaries, + use_ref_pat, ); - let sp = variant.span.with_ctxt(trait_.span.ctxt()); - let variant_path = cx.path(sp, vec![type_ident, variant.ident]); - let mut subpats: Vec<_> = trait_ - .create_struct_patterns( - cx, - variant_path, - &variant.data, - &prefixes, - use_temporaries, - ) - .into_iter() - .map(|p| cx.pat(span, PatKind::Ref(p, ast::Mutability::Not))) - .collect(); - // Here is the pat = `(&VariantK, &VariantK, ...)` let single_pat = if subpats.len() == 1 { subpats.pop().unwrap() @@ -1302,25 +1287,23 @@ impl<'a> MethodDef<'a> { // Build a series of let statements mapping each selflike_arg // to its discriminant value. // - // i.e., for `enum E { A, B(1), C(T, T) }`, and a deriving - // with three Self args, builds three statements: + // i.e., for `enum E { A, B(1), C(T, T) }` for `PartialEq::eq`, + // builds two statements: // ``` - // let __self_vi = std::intrinsics::discriminant_value(&self); - // let __arg_1_vi = std::intrinsics::discriminant_value(&arg1); - // let __arg_2_vi = std::intrinsics::discriminant_value(&arg2); + // let __self_vi = ::core::intrinsics::discriminant_value(self); + // let __arg_1_vi = ::core::intrinsics::discriminant_value(other); // ``` let mut index_let_stmts: Vec = Vec::with_capacity(vi_idents.len() + 1); - // We also build an expression which checks whether all discriminants are equal: - // `__self_vi == __arg_1_vi && __self_vi == __arg_2_vi && ...` + // We also build an expression which checks whether all discriminants are equal, e.g. + // `__self_vi == __arg_1_vi`. let mut discriminant_test = cx.expr_bool(span, true); for (i, (&ident, selflike_arg)) in iter::zip(&vi_idents, &selflike_args).enumerate() { - let selflike_addr = cx.expr_addr_of(span, selflike_arg.clone()); let variant_value = deriving::call_intrinsic( cx, span, sym::discriminant_value, - vec![selflike_addr], + vec![selflike_arg.clone()], ); let let_stmt = cx.stmt_let(span, false, ident, variant_value); index_let_stmts.push(let_stmt); @@ -1347,17 +1330,11 @@ impl<'a> MethodDef<'a> { ) .into_expr(cx, span); - // Final wrinkle: the selflike_args are expressions that deref - // down to desired places, but we cannot actually deref - // them when they are fed as r-values into a tuple - // expression; here add a layer of borrowing, turning - // `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`. - selflike_args.map_in_place(|selflike_arg| cx.expr_addr_of(span, selflike_arg)); let match_arg = cx.expr(span, ast::ExprKind::Tup(selflike_args)); - // Lastly we create an expression which branches on all discriminants being equal - // if discriminant_test { - // match (...) { + // Lastly we create an expression which branches on all discriminants being equal, e.g. + // if __self_vi == _arg_1_vi { + // match (self, other) { // (Variant1, Variant1, ...) => Body1 // (Variant2, Variant2, ...) => Body2, // ... @@ -1376,12 +1353,6 @@ impl<'a> MethodDef<'a> { // for the zero variant case. BlockOrExpr(vec![], Some(deriving::call_unreachable(cx, span))) } else { - // Final wrinkle: the selflike_args are expressions that deref - // down to desired places, but we cannot actually deref - // them when they are fed as r-values into a tuple - // expression; here add a layer of borrowing, turning - // `(*self, *__arg_0, ...)` into `(&*self, &*__arg_0, ...)`. - selflike_args.map_in_place(|selflike_arg| cx.expr_addr_of(span, selflike_arg)); let match_arg = if selflike_args.len() == 1 { selflike_args.pop().unwrap() } else { @@ -1451,7 +1422,7 @@ impl<'a> TraitDef<'a> { struct_path: ast::Path, struct_def: &'a VariantData, prefixes: &[String], - use_temporaries: bool, + use_ref_pat: bool, ) -> Vec> { prefixes .iter() @@ -1459,10 +1430,10 @@ impl<'a> TraitDef<'a> { let pieces_iter = struct_def.fields().iter().enumerate().map(|(i, struct_field)| { let sp = struct_field.span.with_ctxt(self.span.ctxt()); - let binding_mode = if use_temporaries { - ast::BindingMode::ByValue(ast::Mutability::Not) - } else { + let binding_mode = if use_ref_pat { ast::BindingMode::ByRef(ast::Mutability::Not) + } else { + ast::BindingMode::ByValue(ast::Mutability::Not) }; let ident = self.mk_pattern_ident(prefix, i); let path = ident.with_span_pos(sp); @@ -1541,7 +1512,7 @@ impl<'a> TraitDef<'a> { cx: &mut ExtCtxt<'_>, struct_def: &'a VariantData, prefixes: &[String], - use_temporaries: bool, + no_deref: bool, ) -> Vec { self.create_fields(struct_def, |i, _struct_field, sp| { prefixes @@ -1549,7 +1520,7 @@ impl<'a> TraitDef<'a> { .map(|prefix| { let ident = self.mk_pattern_ident(prefix, i); let expr = cx.expr_path(cx.path_ident(sp, ident)); - if use_temporaries { expr } else { cx.expr_deref(sp, expr) } + if no_deref { expr } else { cx.expr_deref(sp, expr) } }) .collect() }) @@ -1564,11 +1535,7 @@ impl<'a> TraitDef<'a> { self.create_fields(struct_def, |i, struct_field, sp| { selflike_args .iter() - .map(|mut selflike_arg| { - // We don't the need the deref, if there is one. - if let ast::ExprKind::Unary(ast::UnOp::Deref, inner) = &selflike_arg.kind { - selflike_arg = inner; - } + .map(|selflike_arg| { // Note: we must use `struct_field.span` rather than `span` in the // `unwrap_or_else` case otherwise the hygiene is wrong and we get // "field `0` of struct `Point` is private" errors on tuple diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs b/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs index 4b20d87629d96..4d46f7cd48a51 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs @@ -196,9 +196,8 @@ impl Bounds { } pub fn get_explicit_self(cx: &ExtCtxt<'_>, span: Span) -> (P, ast::ExplicitSelf) { - // this constructs a fresh `self` path + // This constructs a fresh `self` path. let self_path = cx.expr_self(span); let self_ty = respan(span, SelfKind::Region(None, ast::Mutability::Not)); - let self_expr = cx.expr_deref(span, self_path); - (self_expr, self_ty) + (self_path, self_ty) } diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 7a1df7046b55c..ae98d1ad9e6e2 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -675,8 +675,8 @@ enum Enum1 { impl ::core::clone::Clone for Enum1 { #[inline] fn clone(&self) -> Enum1 { - match &*self { - &Enum1::Single { x: ref __self_0 } => + match self { + Enum1::Single { x: __self_0 } => Enum1::Single { x: ::core::clone::Clone::clone(&*__self_0) }, } } @@ -685,8 +685,8 @@ impl ::core::clone::Clone for Enum1 { #[allow(unused_qualifications)] impl ::core::fmt::Debug for Enum1 { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - match &*self { - &Enum1::Single { x: ref __self_0 } => + match self { + Enum1::Single { x: __self_0 } => ::core::fmt::Formatter::debug_struct_field1_finish(f, "Single", "x", &&*__self_0), } @@ -696,8 +696,8 @@ impl ::core::fmt::Debug for Enum1 { #[allow(unused_qualifications)] impl ::core::hash::Hash for Enum1 { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match &*self { - &Enum1::Single { x: ref __self_0 } => { + match self { + Enum1::Single { x: __self_0 } => { ::core::hash::Hash::hash(&*__self_0, state) } } @@ -709,16 +709,16 @@ impl ::core::marker::StructuralPartialEq for Enum1 {} impl ::core::cmp::PartialEq for Enum1 { #[inline] fn eq(&self, other: &Enum1) -> bool { - match (&*self, &*other) { - (&Enum1::Single { x: ref __self_0 }, &Enum1::Single { - x: ref __arg_1_0 }) => *__self_0 == *__arg_1_0, + match (self, other) { + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => + *__self_0 == *__arg_1_0, } } #[inline] fn ne(&self, other: &Enum1) -> bool { - match (&*self, &*other) { - (&Enum1::Single { x: ref __self_0 }, &Enum1::Single { - x: ref __arg_1_0 }) => *__self_0 != *__arg_1_0, + match (self, other) { + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => + *__self_0 != *__arg_1_0, } } } @@ -739,9 +739,8 @@ impl ::core::cmp::PartialOrd for Enum1 { #[inline] fn partial_cmp(&self, other: &Enum1) -> ::core::option::Option<::core::cmp::Ordering> { - match (&*self, &*other) { - (&Enum1::Single { x: ref __self_0 }, &Enum1::Single { - x: ref __arg_1_0 }) => + match (self, other) { + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0), } } @@ -751,9 +750,8 @@ impl ::core::cmp::PartialOrd for Enum1 { impl ::core::cmp::Ord for Enum1 { #[inline] fn cmp(&self, other: &Enum1) -> ::core::cmp::Ordering { - match (&*self, &*other) { - (&Enum1::Single { x: ref __self_0 }, &Enum1::Single { - x: ref __arg_1_0 }) => + match (self, other) { + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), } } @@ -770,15 +768,15 @@ enum Fieldless1 { impl ::core::clone::Clone for Fieldless1 { #[inline] fn clone(&self) -> Fieldless1 { - match &*self { &Fieldless1::A => Fieldless1::A, } + match self { Fieldless1::A => Fieldless1::A, } } } #[automatically_derived] #[allow(unused_qualifications)] impl ::core::fmt::Debug for Fieldless1 { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - match &*self { - &Fieldless1::A => ::core::fmt::Formatter::write_str(f, "A"), + match self { + Fieldless1::A => ::core::fmt::Formatter::write_str(f, "A"), } } } @@ -792,7 +790,7 @@ impl ::core::default::Default for Fieldless1 { #[allow(unused_qualifications)] impl ::core::hash::Hash for Fieldless1 { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match &*self { _ => {} } + match self { _ => {} } } } impl ::core::marker::StructuralPartialEq for Fieldless1 {} @@ -801,7 +799,7 @@ impl ::core::marker::StructuralPartialEq for Fieldless1 {} impl ::core::cmp::PartialEq for Fieldless1 { #[inline] fn eq(&self, other: &Fieldless1) -> bool { - match (&*self, &*other) { _ => true, } + match (self, other) { _ => true, } } } impl ::core::marker::StructuralEq for Fieldless1 {} @@ -819,7 +817,7 @@ impl ::core::cmp::PartialOrd for Fieldless1 { #[inline] fn partial_cmp(&self, other: &Fieldless1) -> ::core::option::Option<::core::cmp::Ordering> { - match (&*self, &*other) { + match (self, other) { _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), } } @@ -829,7 +827,7 @@ impl ::core::cmp::PartialOrd for Fieldless1 { impl ::core::cmp::Ord for Fieldless1 { #[inline] fn cmp(&self, other: &Fieldless1) -> ::core::cmp::Ordering { - match (&*self, &*other) { _ => ::core::cmp::Ordering::Equal, } + match (self, other) { _ => ::core::cmp::Ordering::Equal, } } } @@ -854,10 +852,10 @@ impl ::core::marker::Copy for Fieldless { } #[allow(unused_qualifications)] impl ::core::fmt::Debug for Fieldless { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - match &*self { - &Fieldless::A => ::core::fmt::Formatter::write_str(f, "A"), - &Fieldless::B => ::core::fmt::Formatter::write_str(f, "B"), - &Fieldless::C => ::core::fmt::Formatter::write_str(f, "C"), + match self { + Fieldless::A => ::core::fmt::Formatter::write_str(f, "A"), + Fieldless::B => ::core::fmt::Formatter::write_str(f, "B"), + Fieldless::C => ::core::fmt::Formatter::write_str(f, "C"), } } } @@ -871,7 +869,7 @@ impl ::core::default::Default for Fieldless { #[allow(unused_qualifications)] impl ::core::hash::Hash for Fieldless { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match &*self { + match self { _ => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state) @@ -885,10 +883,10 @@ impl ::core::marker::StructuralPartialEq for Fieldless {} impl ::core::cmp::PartialEq for Fieldless { #[inline] fn eq(&self, other: &Fieldless) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { _ => true, } + match (self, other) { _ => true, } } else { false } } } @@ -907,10 +905,10 @@ impl ::core::cmp::PartialOrd for Fieldless { #[inline] fn partial_cmp(&self, other: &Fieldless) -> ::core::option::Option<::core::cmp::Ordering> { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { + match (self, other) { _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), } @@ -924,10 +922,10 @@ impl ::core::cmp::PartialOrd for Fieldless { impl ::core::cmp::Ord for Fieldless { #[inline] fn cmp(&self, other: &Fieldless) -> ::core::cmp::Ordering { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { _ => ::core::cmp::Ordering::Equal, } + match (self, other) { _ => ::core::cmp::Ordering::Equal, } } else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) } } } @@ -960,13 +958,13 @@ impl ::core::marker::Copy for Mixed { } #[allow(unused_qualifications)] impl ::core::fmt::Debug for Mixed { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - match &*self { - &Mixed::P => ::core::fmt::Formatter::write_str(f, "P"), - &Mixed::Q => ::core::fmt::Formatter::write_str(f, "Q"), - &Mixed::R(ref __self_0) => + match self { + Mixed::P => ::core::fmt::Formatter::write_str(f, "P"), + Mixed::Q => ::core::fmt::Formatter::write_str(f, "Q"), + Mixed::R(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "R", &&*__self_0), - &Mixed::S { d1: ref __self_0, d2: ref __self_1 } => + Mixed::S { d1: __self_0, d2: __self_1 } => ::core::fmt::Formatter::debug_struct_field2_finish(f, "S", "d1", &&*__self_0, "d2", &&*__self_1), } @@ -982,13 +980,13 @@ impl ::core::default::Default for Mixed { #[allow(unused_qualifications)] impl ::core::hash::Hash for Mixed { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match &*self { - &Mixed::R(ref __self_0) => { + match self { + Mixed::R(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); ::core::hash::Hash::hash(&*__self_0, state) } - &Mixed::S { d1: ref __self_0, d2: ref __self_1 } => { + Mixed::S { d1: __self_0, d2: __self_1 } => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); ::core::hash::Hash::hash(&*__self_0, state); @@ -1007,14 +1005,14 @@ impl ::core::marker::StructuralPartialEq for Mixed {} impl ::core::cmp::PartialEq for Mixed { #[inline] fn eq(&self, other: &Mixed) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) => + match (self, other) { + (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => *__self_0 == *__arg_1_0, - (&Mixed::S { d1: ref __self_0, d2: ref __self_1 }, - &Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) => + (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { + d1: __arg_1_0, d2: __arg_1_1 }) => *__self_0 == *__arg_1_0 && *__self_1 == *__arg_1_1, _ => true, } @@ -1022,14 +1020,14 @@ impl ::core::cmp::PartialEq for Mixed { } #[inline] fn ne(&self, other: &Mixed) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) => + match (self, other) { + (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => *__self_0 != *__arg_1_0, - (&Mixed::S { d1: ref __self_0, d2: ref __self_1 }, - &Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) => + (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { + d1: __arg_1_0, d2: __arg_1_1 }) => *__self_0 != *__arg_1_0 || *__self_1 != *__arg_1_1, _ => false, } @@ -1053,15 +1051,15 @@ impl ::core::cmp::PartialOrd for Mixed { #[inline] fn partial_cmp(&self, other: &Mixed) -> ::core::option::Option<::core::cmp::Ordering> { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) => + match (self, other) { + (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0), - (&Mixed::S { d1: ref __self_0, d2: ref __self_1 }, - &Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) => + (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { + d1: __arg_1_0, d2: __arg_1_1 }) => match ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0) { ::core::option::Option::Some(::core::cmp::Ordering::Equal) @@ -1083,14 +1081,14 @@ impl ::core::cmp::PartialOrd for Mixed { impl ::core::cmp::Ord for Mixed { #[inline] fn cmp(&self, other: &Mixed) -> ::core::cmp::Ordering { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Mixed::R(ref __self_0), &Mixed::R(ref __arg_1_0)) => + match (self, other) { + (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), - (&Mixed::S { d1: ref __self_0, d2: ref __self_1 }, - &Mixed::S { d1: ref __arg_1_0, d2: ref __arg_1_1 }) => + (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { + d1: __arg_1_0, d2: __arg_1_1 }) => match ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0) { ::core::cmp::Ordering::Equal => ::core::cmp::Ord::cmp(&*__self_1, &*__arg_1_1), @@ -1110,12 +1108,12 @@ enum Fielded { X(u32), Y(bool), Z(Option), } impl ::core::clone::Clone for Fielded { #[inline] fn clone(&self) -> Fielded { - match &*self { - &Fielded::X(ref __self_0) => + match self { + Fielded::X(__self_0) => Fielded::X(::core::clone::Clone::clone(&*__self_0)), - &Fielded::Y(ref __self_0) => + Fielded::Y(__self_0) => Fielded::Y(::core::clone::Clone::clone(&*__self_0)), - &Fielded::Z(ref __self_0) => + Fielded::Z(__self_0) => Fielded::Z(::core::clone::Clone::clone(&*__self_0)), } } @@ -1124,14 +1122,14 @@ impl ::core::clone::Clone for Fielded { #[allow(unused_qualifications)] impl ::core::fmt::Debug for Fielded { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - match &*self { - &Fielded::X(ref __self_0) => + match self { + Fielded::X(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "X", &&*__self_0), - &Fielded::Y(ref __self_0) => + Fielded::Y(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Y", &&*__self_0), - &Fielded::Z(ref __self_0) => + Fielded::Z(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Z", &&*__self_0), } @@ -1141,18 +1139,18 @@ impl ::core::fmt::Debug for Fielded { #[allow(unused_qualifications)] impl ::core::hash::Hash for Fielded { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match &*self { - &Fielded::X(ref __self_0) => { + match self { + Fielded::X(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); ::core::hash::Hash::hash(&*__self_0, state) } - &Fielded::Y(ref __self_0) => { + Fielded::Y(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); ::core::hash::Hash::hash(&*__self_0, state) } - &Fielded::Z(ref __self_0) => { + Fielded::Z(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); ::core::hash::Hash::hash(&*__self_0, state) @@ -1166,15 +1164,15 @@ impl ::core::marker::StructuralPartialEq for Fielded {} impl ::core::cmp::PartialEq for Fielded { #[inline] fn eq(&self, other: &Fielded) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) => + match (self, other) { + (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => *__self_0 == *__arg_1_0, - (&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) => + (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => *__self_0 == *__arg_1_0, - (&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) => + (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => *__self_0 == *__arg_1_0, _ => unsafe { ::core::intrinsics::unreachable() } } @@ -1182,15 +1180,15 @@ impl ::core::cmp::PartialEq for Fielded { } #[inline] fn ne(&self, other: &Fielded) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) => + match (self, other) { + (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => *__self_0 != *__arg_1_0, - (&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) => + (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => *__self_0 != *__arg_1_0, - (&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) => + (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => *__self_0 != *__arg_1_0, _ => unsafe { ::core::intrinsics::unreachable() } } @@ -1216,17 +1214,17 @@ impl ::core::cmp::PartialOrd for Fielded { #[inline] fn partial_cmp(&self, other: &Fielded) -> ::core::option::Option<::core::cmp::Ordering> { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) => + match (self, other) { + (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0), - (&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) => + (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0), - (&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) => + (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0), _ => unsafe { ::core::intrinsics::unreachable() } @@ -1241,15 +1239,15 @@ impl ::core::cmp::PartialOrd for Fielded { impl ::core::cmp::Ord for Fielded { #[inline] fn cmp(&self, other: &Fielded) -> ::core::cmp::Ordering { - let __self_vi = ::core::intrinsics::discriminant_value(&*self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(&*other); + let __self_vi = ::core::intrinsics::discriminant_value(self); + let __arg_1_vi = ::core::intrinsics::discriminant_value(other); if __self_vi == __arg_1_vi { - match (&*self, &*other) { - (&Fielded::X(ref __self_0), &Fielded::X(ref __arg_1_0)) => + match (self, other) { + (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), - (&Fielded::Y(ref __self_0), &Fielded::Y(ref __arg_1_0)) => + (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), - (&Fielded::Z(ref __self_0), &Fielded::Z(ref __arg_1_0)) => + (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), _ => unsafe { ::core::intrinsics::unreachable() } } From 96f09d73cd6ffc4a4e2719819e205b6e5a26718f Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 7 Jul 2022 11:09:07 +1000 Subject: [PATCH 06/11] Remove unnecessary `&*` sigil pairs in derived code. By producing `&T` expressions for fields instead of `T`. This matches what the existing comments (e.g. on `FieldInfo`) claim is happening, and it's also what most of the trait-specific code needs. The exception is `PartialEq`, which needs `T` expressions for lots of special case error messaging to work. So we now convert the `&T` back to a `T` for `PartialEq`. --- .../src/deriving/clone.rs | 2 +- .../src/deriving/cmp/ord.rs | 5 +- .../src/deriving/cmp/partial_eq.rs | 19 ++++- .../src/deriving/cmp/partial_ord.rs | 5 +- .../src/deriving/debug.rs | 9 +-- .../src/deriving/generic/mod.rs | 48 +++++++---- .../rustc_builtin_macros/src/deriving/hash.rs | 16 ++-- .../ui/deriving/deriving-all-codegen.stdout | 79 +++++++++---------- 8 files changed, 102 insertions(+), 81 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/clone.rs b/compiler/rustc_builtin_macros/src/deriving/clone.rs index 9cd72ed0c67b2..72aa8e17d01f9 100644 --- a/compiler/rustc_builtin_macros/src/deriving/clone.rs +++ b/compiler/rustc_builtin_macros/src/deriving/clone.rs @@ -161,7 +161,7 @@ fn cs_clone( let all_fields; let fn_path = cx.std_path(&[sym::clone, sym::Clone, sym::clone]); let subcall = |cx: &mut ExtCtxt<'_>, field: &FieldInfo| { - let args = vec![cx.expr_addr_of(field.span, field.self_expr.clone())]; + let args = vec![field.self_expr.clone()]; cx.expr_call_global(field.span, fn_path.clone(), args) }; diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs index 859e995356e80..aad0ab3f5bb3b 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs @@ -63,10 +63,7 @@ pub fn cs_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> Bl let [other_expr] = &field.other_selflike_exprs[..] else { cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`"); }; - let args = vec![ - cx.expr_addr_of(field.span, field.self_expr.clone()), - cx.expr_addr_of(field.span, other_expr.clone()), - ]; + let args = vec![field.self_expr.clone(), other_expr.clone()]; cx.expr_call_global(field.span, cmp_path.clone(), args) } CsFold::Combine(span, expr1, expr2) => { diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs index 724c639984cca..83534b62b46c7 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs @@ -2,7 +2,8 @@ use crate::deriving::generic::ty::*; use crate::deriving::generic::*; use crate::deriving::{path_local, path_std}; -use rustc_ast::{BinOpKind, MetaItem}; +use rustc_ast::ptr::P; +use rustc_ast::{BinOpKind, BorrowKind, Expr, ExprKind, MetaItem, Mutability}; use rustc_expand::base::{Annotatable, ExtCtxt}; use rustc_span::symbol::sym; use rustc_span::Span; @@ -32,7 +33,21 @@ pub fn expand_deriving_partial_eq( let [other_expr] = &field.other_selflike_exprs[..] else { cx.span_bug(field.span, "not exactly 2 arguments in `derive(PartialEq)`"); }; - cx.expr_binary(field.span, op, field.self_expr.clone(), other_expr.clone()) + + // We received `&T` arguments. Convert them to `T` by + // stripping `&` or adding `*`. This isn't necessary for + // type checking, but it results in much better error + // messages if something goes wrong. + let convert = |expr: &P| { + if let ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, inner) = + &expr.kind + { + inner.clone() + } else { + cx.expr_deref(field.span, expr.clone()) + } + }; + cx.expr_binary(field.span, op, convert(&field.self_expr), convert(other_expr)) } CsFold::Combine(span, expr1, expr2) => cx.expr_binary(span, combiner, expr1, expr2), CsFold::Fieldless => cx.expr_bool(span, base), diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs index 3f9843922dad7..ce45b0e196556 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs @@ -71,10 +71,7 @@ pub fn cs_partial_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_ let [other_expr] = &field.other_selflike_exprs[..] else { cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`"); }; - let args = vec![ - cx.expr_addr_of(field.span, field.self_expr.clone()), - cx.expr_addr_of(field.span, other_expr.clone()), - ]; + let args = vec![field.self_expr.clone(), other_expr.clone()]; cx.expr_call_global(field.span, partial_cmp_path.clone(), args) } CsFold::Combine(span, expr1, expr2) => { diff --git a/compiler/rustc_builtin_macros/src/deriving/debug.rs b/compiler/rustc_builtin_macros/src/deriving/debug.rs index b99198054def8..5de2052048692 100644 --- a/compiler/rustc_builtin_macros/src/deriving/debug.rs +++ b/compiler/rustc_builtin_macros/src/deriving/debug.rs @@ -95,9 +95,8 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_> ); args.push(name); } - // Use double indirection to make sure this works for unsized types + // Use an extra indirection to make sure this works for unsized types. let field = cx.expr_addr_of(field.span, field.self_expr.clone()); - let field = cx.expr_addr_of(field.span, field); args.push(field); } let expr = cx.expr_call_global(span, fn_path_debug, args); @@ -115,9 +114,9 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_> )); } - // Use double indirection to make sure this works for unsized types - let value_ref = cx.expr_addr_of(field.span, field.self_expr.clone()); - value_exprs.push(cx.expr_addr_of(field.span, value_ref)); + // Use an extra indirection to make sure this works for unsized types. + let field = cx.expr_addr_of(field.span, field.self_expr.clone()); + value_exprs.push(field); } // `let names: &'static _ = &["field1", "field2"];` diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index dbac6e61ea9aa..a5d5782dbd2d1 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -1004,7 +1004,7 @@ impl<'a> MethodDef<'a> { /// ``` /// #[derive(PartialEq)] /// # struct Dummy; - /// struct A { x: i32, y: i32 } + /// struct A { x: u8, y: u8 } /// /// // equivalent to: /// impl PartialEq for A { @@ -1016,9 +1016,9 @@ impl<'a> MethodDef<'a> { /// But if the struct is `repr(packed)`, we can't use something like /// `&self.x` on a packed type (as required for e.g. `Debug` and `Hash`) /// because that might cause an unaligned ref. So we use let-destructuring - /// instead. + /// instead. If the struct impls `Copy`: /// ``` - /// # struct A { x: i32, y: i32 } + /// # struct A { x: u8, y: u8 } /// impl PartialEq for A { /// fn eq(&self, other: &A) -> bool { /// let Self { x: __self_0_0, y: __self_0_1 } = *self; @@ -1027,6 +1027,19 @@ impl<'a> MethodDef<'a> { /// } /// } /// ``` + /// If it doesn't impl `Copy`: + /// ``` + /// # struct A { x: u8, y: u8 } + /// impl PartialEq for A { + /// fn eq(&self, other: &A) -> bool { + /// let Self { x: ref __self_0_0, y: ref __self_0_1 } = *self; + /// let Self { x: ref __self_1_0, y: ref __self_1_1 } = *other; + /// *__self_0_0 == *__self_1_0 && *__self_0_1 == *__self_1_1 + /// } + /// } + /// ``` + /// This latter case only works if the fields match the alignment required + /// by the `packed(N)` attribute. fn expand_struct_method_body<'b>( &self, cx: &mut ExtCtxt<'_>, @@ -1058,9 +1071,9 @@ impl<'a> MethodDef<'a> { } else { let prefixes: Vec<_> = (0..selflike_args.len()).map(|i| format!("__self_{}", i)).collect(); - let no_deref = always_copy; + let addr_of = always_copy; let selflike_fields = - trait_.create_struct_pattern_fields(cx, struct_def, &prefixes, no_deref); + trait_.create_struct_pattern_fields(cx, struct_def, &prefixes, addr_of); let mut body = mk_body(cx, selflike_fields); let struct_path = cx.path(span, vec![Ident::new(kw::SelfUpper, type_ident.span)]); @@ -1194,9 +1207,9 @@ impl<'a> MethodDef<'a> { // A single arm has form (&VariantK, &VariantK, ...) => BodyK // (see "Final wrinkle" note below for why.) - let no_deref = false; // because enums can't be repr(packed) + let addr_of = false; // because enums can't be repr(packed) let fields = - trait_.create_struct_pattern_fields(cx, &variant.data, &prefixes, no_deref); + trait_.create_struct_pattern_fields(cx, &variant.data, &prefixes, addr_of); let sp = variant.span.with_ctxt(trait_.span.ctxt()); let variant_path = cx.path(sp, vec![type_ident, variant.ident]); @@ -1512,7 +1525,7 @@ impl<'a> TraitDef<'a> { cx: &mut ExtCtxt<'_>, struct_def: &'a VariantData, prefixes: &[String], - no_deref: bool, + addr_of: bool, ) -> Vec { self.create_fields(struct_def, |i, _struct_field, sp| { prefixes @@ -1520,7 +1533,7 @@ impl<'a> TraitDef<'a> { .map(|prefix| { let ident = self.mk_pattern_ident(prefix, i); let expr = cx.expr_path(cx.path_ident(sp, ident)); - if no_deref { expr } else { cx.expr_deref(sp, expr) } + if addr_of { cx.expr_addr_of(sp, expr) } else { expr } }) .collect() }) @@ -1536,17 +1549,20 @@ impl<'a> TraitDef<'a> { selflike_args .iter() .map(|selflike_arg| { - // Note: we must use `struct_field.span` rather than `span` in the + // Note: we must use `struct_field.span` rather than `sp` in the // `unwrap_or_else` case otherwise the hygiene is wrong and we get // "field `0` of struct `Point` is private" errors on tuple // structs. - cx.expr( + cx.expr_addr_of( sp, - ast::ExprKind::Field( - selflike_arg.clone(), - struct_field.ident.unwrap_or_else(|| { - Ident::from_str_and_span(&i.to_string(), struct_field.span) - }), + cx.expr( + sp, + ast::ExprKind::Field( + selflike_arg.clone(), + struct_field.ident.unwrap_or_else(|| { + Ident::from_str_and_span(&i.to_string(), struct_field.span) + }), + ), ), ) }) diff --git a/compiler/rustc_builtin_macros/src/deriving/hash.rs b/compiler/rustc_builtin_macros/src/deriving/hash.rs index 2213cd6d37d2d..52239eff5da57 100644 --- a/compiler/rustc_builtin_macros/src/deriving/hash.rs +++ b/compiler/rustc_builtin_macros/src/deriving/hash.rs @@ -52,14 +52,13 @@ fn hash_substructure( let [state_expr] = substr.nonselflike_args else { cx.span_bug(trait_span, "incorrect number of arguments in `derive(Hash)`"); }; - let call_hash = |span, thing_expr| { + let call_hash = |span, expr| { let hash_path = { let strs = cx.std_path(&[sym::hash, sym::Hash, sym::hash]); cx.expr_path(cx.path_global(span, strs)) }; - let ref_thing = cx.expr_addr_of(span, thing_expr); - let expr = cx.expr_call(span, hash_path, vec![ref_thing, state_expr.clone()]); + let expr = cx.expr_call(span, hash_path, vec![expr, state_expr.clone()]); cx.stmt_expr(expr) }; let mut stmts = Vec::new(); @@ -67,11 +66,14 @@ fn hash_substructure( let fields = match substr.fields { Struct(_, fs) | EnumMatching(_, 1, .., fs) => fs, EnumMatching(.., fs) => { - let variant_value = deriving::call_intrinsic( - cx, + let variant_value = cx.expr_addr_of( trait_span, - sym::discriminant_value, - vec![cx.expr_self(trait_span)], + deriving::call_intrinsic( + cx, + trait_span, + sym::discriminant_value, + vec![cx.expr_self(trait_span)], + ), ); stmts.push(call_hash(trait_span, variant_value)); diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index ae98d1ad9e6e2..0b88d68fce8af 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -525,7 +525,7 @@ impl ::core::clone::Clone for PackedNonCopy { #[inline] fn clone(&self) -> PackedNonCopy { let Self(ref __self_0_0) = *self; - PackedNonCopy(::core::clone::Clone::clone(&*__self_0_0)) + PackedNonCopy(::core::clone::Clone::clone(__self_0_0)) } } #[automatically_derived] @@ -534,7 +534,7 @@ impl ::core::fmt::Debug for PackedNonCopy { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { let Self(ref __self_0_0) = *self; ::core::fmt::Formatter::debug_tuple_field1_finish(f, "PackedNonCopy", - &&*__self_0_0) + &__self_0_0) } } #[automatically_derived] @@ -550,7 +550,7 @@ impl ::core::default::Default for PackedNonCopy { impl ::core::hash::Hash for PackedNonCopy { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { let Self(ref __self_0_0) = *self; - ::core::hash::Hash::hash(&*__self_0_0, state) + ::core::hash::Hash::hash(__self_0_0, state) } } impl ::core::marker::StructuralPartialEq for PackedNonCopy {} @@ -589,7 +589,7 @@ impl ::core::cmp::PartialOrd for PackedNonCopy { -> ::core::option::Option<::core::cmp::Ordering> { let Self(ref __self_0_0) = *self; let Self(ref __self_1_0) = *other; - ::core::cmp::PartialOrd::partial_cmp(&*__self_0_0, &*__self_1_0) + ::core::cmp::PartialOrd::partial_cmp(__self_0_0, __self_1_0) } } #[automatically_derived] @@ -599,7 +599,7 @@ impl ::core::cmp::Ord for PackedNonCopy { fn cmp(&self, other: &PackedNonCopy) -> ::core::cmp::Ordering { let Self(ref __self_0_0) = *self; let Self(ref __self_1_0) = *other; - ::core::cmp::Ord::cmp(&*__self_0_0, &*__self_1_0) + ::core::cmp::Ord::cmp(__self_0_0, __self_1_0) } } @@ -677,7 +677,7 @@ impl ::core::clone::Clone for Enum1 { fn clone(&self) -> Enum1 { match self { Enum1::Single { x: __self_0 } => - Enum1::Single { x: ::core::clone::Clone::clone(&*__self_0) }, + Enum1::Single { x: ::core::clone::Clone::clone(__self_0) }, } } } @@ -688,7 +688,7 @@ impl ::core::fmt::Debug for Enum1 { match self { Enum1::Single { x: __self_0 } => ::core::fmt::Formatter::debug_struct_field1_finish(f, - "Single", "x", &&*__self_0), + "Single", "x", &__self_0), } } } @@ -698,7 +698,7 @@ impl ::core::hash::Hash for Enum1 { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { match self { Enum1::Single { x: __self_0 } => { - ::core::hash::Hash::hash(&*__self_0, state) + ::core::hash::Hash::hash(__self_0, state) } } } @@ -741,7 +741,7 @@ impl ::core::cmp::PartialOrd for Enum1 { -> ::core::option::Option<::core::cmp::Ordering> { match (self, other) { (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => - ::core::cmp::PartialOrd::partial_cmp(&*__self_0, &*__arg_1_0), + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), } } } @@ -752,7 +752,7 @@ impl ::core::cmp::Ord for Enum1 { fn cmp(&self, other: &Enum1) -> ::core::cmp::Ordering { match (self, other) { (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => - ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), + ::core::cmp::Ord::cmp(__self_0, __arg_1_0), } } } @@ -963,10 +963,10 @@ impl ::core::fmt::Debug for Mixed { Mixed::Q => ::core::fmt::Formatter::write_str(f, "Q"), Mixed::R(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "R", - &&*__self_0), + &__self_0), Mixed::S { d1: __self_0, d2: __self_1 } => ::core::fmt::Formatter::debug_struct_field2_finish(f, "S", - "d1", &&*__self_0, "d2", &&*__self_1), + "d1", &__self_0, "d2", &__self_1), } } } @@ -984,13 +984,13 @@ impl ::core::hash::Hash for Mixed { Mixed::R(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); - ::core::hash::Hash::hash(&*__self_0, state) + ::core::hash::Hash::hash(__self_0, state) } Mixed::S { d1: __self_0, d2: __self_1 } => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); - ::core::hash::Hash::hash(&*__self_0, state); - ::core::hash::Hash::hash(&*__self_1, state) + ::core::hash::Hash::hash(__self_0, state); + ::core::hash::Hash::hash(__self_1, state) } _ => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), @@ -1056,16 +1056,14 @@ impl ::core::cmp::PartialOrd for Mixed { if __self_vi == __arg_1_vi { match (self, other) { (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(&*__self_0, - &*__arg_1_0), + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { d1: __arg_1_0, d2: __arg_1_1 }) => - match ::core::cmp::PartialOrd::partial_cmp(&*__self_0, - &*__arg_1_0) { + match ::core::cmp::PartialOrd::partial_cmp(__self_0, + __arg_1_0) { ::core::option::Option::Some(::core::cmp::Ordering::Equal) => - ::core::cmp::PartialOrd::partial_cmp(&*__self_1, - &*__arg_1_1), + ::core::cmp::PartialOrd::partial_cmp(__self_1, __arg_1_1), cmp => cmp, }, _ => @@ -1086,12 +1084,12 @@ impl ::core::cmp::Ord for Mixed { if __self_vi == __arg_1_vi { match (self, other) { (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => - ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), + ::core::cmp::Ord::cmp(__self_0, __arg_1_0), (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { d1: __arg_1_0, d2: __arg_1_1 }) => - match ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0) { + match ::core::cmp::Ord::cmp(__self_0, __arg_1_0) { ::core::cmp::Ordering::Equal => - ::core::cmp::Ord::cmp(&*__self_1, &*__arg_1_1), + ::core::cmp::Ord::cmp(__self_1, __arg_1_1), cmp => cmp, }, _ => ::core::cmp::Ordering::Equal, @@ -1110,11 +1108,11 @@ impl ::core::clone::Clone for Fielded { fn clone(&self) -> Fielded { match self { Fielded::X(__self_0) => - Fielded::X(::core::clone::Clone::clone(&*__self_0)), + Fielded::X(::core::clone::Clone::clone(__self_0)), Fielded::Y(__self_0) => - Fielded::Y(::core::clone::Clone::clone(&*__self_0)), + Fielded::Y(::core::clone::Clone::clone(__self_0)), Fielded::Z(__self_0) => - Fielded::Z(::core::clone::Clone::clone(&*__self_0)), + Fielded::Z(::core::clone::Clone::clone(__self_0)), } } } @@ -1125,13 +1123,13 @@ impl ::core::fmt::Debug for Fielded { match self { Fielded::X(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "X", - &&*__self_0), + &__self_0), Fielded::Y(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Y", - &&*__self_0), + &__self_0), Fielded::Z(__self_0) => ::core::fmt::Formatter::debug_tuple_field1_finish(f, "Z", - &&*__self_0), + &__self_0), } } } @@ -1143,17 +1141,17 @@ impl ::core::hash::Hash for Fielded { Fielded::X(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); - ::core::hash::Hash::hash(&*__self_0, state) + ::core::hash::Hash::hash(__self_0, state) } Fielded::Y(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); - ::core::hash::Hash::hash(&*__self_0, state) + ::core::hash::Hash::hash(__self_0, state) } Fielded::Z(__self_0) => { ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), state); - ::core::hash::Hash::hash(&*__self_0, state) + ::core::hash::Hash::hash(__self_0, state) } } } @@ -1219,14 +1217,11 @@ impl ::core::cmp::PartialOrd for Fielded { if __self_vi == __arg_1_vi { match (self, other) { (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(&*__self_0, - &*__arg_1_0), + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(&*__self_0, - &*__arg_1_0), + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(&*__self_0, - &*__arg_1_0), + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), _ => unsafe { ::core::intrinsics::unreachable() } } } else { @@ -1244,11 +1239,11 @@ impl ::core::cmp::Ord for Fielded { if __self_vi == __arg_1_vi { match (self, other) { (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => - ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), + ::core::cmp::Ord::cmp(__self_0, __arg_1_0), (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => - ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), + ::core::cmp::Ord::cmp(__self_0, __arg_1_0), (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => - ::core::cmp::Ord::cmp(&*__self_0, &*__arg_1_0), + ::core::cmp::Ord::cmp(__self_0, __arg_1_0), _ => unsafe { ::core::intrinsics::unreachable() } } } else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) } From 56178d4259380e07dd4bcced502916326407e59f Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 8 Jul 2022 15:16:14 +1000 Subject: [PATCH 07/11] Rename tag-related things. Use `tag` in names of things referring to tags, instead of the mysterious `vi`. Also change `arg_N` in output to `argN`, which has the same length as `self` and so results in nicer vertical alignments. --- .../src/deriving/generic/mod.rs | 43 ++--- .../ui/deriving/deriving-all-codegen.stdout | 179 +++++++++--------- 2 files changed, 109 insertions(+), 113 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index a5d5782dbd2d1..2e21d197cdf8e 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -146,12 +146,12 @@ //! //! ```{.text} //! EnumNonMatchingCollapsed( -//! &[, ]) +//! &[, ]) //! ``` //! //! It is the same for when the arguments are flipped to `C1 {x}` and //! `C0(a)`; the only difference is what the values of the identifiers -//! and will +//! and will //! be in the generated code. //! //! `EnumNonMatchingCollapsed` deliberately provides far less information @@ -1125,12 +1125,12 @@ impl<'a> MethodDef<'a> { /// impl ::core::cmp::PartialEq for A { /// #[inline] /// fn eq(&self, other: &A) -> bool { - /// let __self_vi = ::core::intrinsics::discriminant_value(self); - /// let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - /// if __self_vi == __arg_1_vi { + /// let __self_tag = ::core::intrinsics::discriminant_value(self); + /// let __arg1_tag = ::core::intrinsics::discriminant_value(other); + /// if __self_tag == __arg1_tag { /// match (self, other) { - /// (A::A2(__self_0), A::A2(__arg_1_0)) => - /// *__self_0 == *__arg_1_0, + /// (A::A2(__self_0), A::A2(__arg1_0)) => + /// *__self_0 == *__arg1_0, /// _ => true, /// } /// } else { @@ -1171,25 +1171,22 @@ impl<'a> MethodDef<'a> { .iter() .enumerate() .skip(1) - .map(|(arg_count, _selflike_arg)| format!("__arg_{}", arg_count)), + .map(|(arg_count, _selflike_arg)| format!("__arg{}", arg_count)), ) .collect::>(); - // The `vi_idents` will be bound, solely in the catch-all, to + // The `tag_idents` will be bound, solely in the catch-all, to // a series of let statements mapping each selflike_arg to an int // value corresponding to its discriminant. - let vi_idents = prefixes + let tag_idents = prefixes .iter() - .map(|name| { - let vi_suffix = format!("{}_vi", name); - Ident::from_str_and_span(&vi_suffix, span) - }) + .map(|name| Ident::from_str_and_span(&format!("{}_tag", name), span)) .collect::>(); // Builds, via callback to call_substructure_method, the // delegated expression that handles the catch-all case, // using `__variants_tuple` to drive logic if necessary. - let catch_all_substructure = EnumNonMatchingCollapsed(&vi_idents); + let catch_all_substructure = EnumNonMatchingCollapsed(&tag_idents); let first_fieldless = variants.iter().find(|v| v.data.fields().is_empty()); @@ -1303,15 +1300,15 @@ impl<'a> MethodDef<'a> { // i.e., for `enum E { A, B(1), C(T, T) }` for `PartialEq::eq`, // builds two statements: // ``` - // let __self_vi = ::core::intrinsics::discriminant_value(self); - // let __arg_1_vi = ::core::intrinsics::discriminant_value(other); + // let __self_tag = ::core::intrinsics::discriminant_value(self); + // let __arg1_tag = ::core::intrinsics::discriminant_value(other); // ``` - let mut index_let_stmts: Vec = Vec::with_capacity(vi_idents.len() + 1); + let mut index_let_stmts: Vec = Vec::with_capacity(tag_idents.len() + 1); // We also build an expression which checks whether all discriminants are equal, e.g. - // `__self_vi == __arg_1_vi`. + // `__self_tag == __arg1_tag`. let mut discriminant_test = cx.expr_bool(span, true); - for (i, (&ident, selflike_arg)) in iter::zip(&vi_idents, &selflike_args).enumerate() { + for (i, (&ident, selflike_arg)) in iter::zip(&tag_idents, &selflike_args).enumerate() { let variant_value = deriving::call_intrinsic( cx, span, @@ -1322,7 +1319,7 @@ impl<'a> MethodDef<'a> { index_let_stmts.push(let_stmt); if i > 0 { - let id0 = cx.expr_ident(span, vi_idents[0]); + let id0 = cx.expr_ident(span, tag_idents[0]); let id = cx.expr_ident(span, ident); let test = cx.expr_binary(span, BinOpKind::Eq, id0, id); discriminant_test = if i == 1 { @@ -1346,7 +1343,7 @@ impl<'a> MethodDef<'a> { let match_arg = cx.expr(span, ast::ExprKind::Tup(selflike_args)); // Lastly we create an expression which branches on all discriminants being equal, e.g. - // if __self_vi == _arg_1_vi { + // if __self_tag == _arg1_tag { // match (self, other) { // (Variant1, Variant1, ...) => Body1 // (Variant2, Variant2, ...) => Body2, @@ -1355,7 +1352,7 @@ impl<'a> MethodDef<'a> { // } // } // else { - // + // // } let all_match = cx.expr_match(span, match_arg, match_arms); let arm_expr = cx.expr_if(span, discriminant_test, all_match, Some(arm_expr)); diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 0b88d68fce8af..0337ca2634fb6 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -710,15 +710,15 @@ impl ::core::cmp::PartialEq for Enum1 { #[inline] fn eq(&self, other: &Enum1) -> bool { match (self, other) { - (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => - *__self_0 == *__arg_1_0, + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg1_0 }) => + *__self_0 == *__arg1_0, } } #[inline] fn ne(&self, other: &Enum1) -> bool { match (self, other) { - (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => - *__self_0 != *__arg_1_0, + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg1_0 }) => + *__self_0 != *__arg1_0, } } } @@ -740,8 +740,8 @@ impl ::core::cmp::PartialOrd for Enum1 { fn partial_cmp(&self, other: &Enum1) -> ::core::option::Option<::core::cmp::Ordering> { match (self, other) { - (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => - ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg1_0 }) => + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), } } } @@ -751,8 +751,8 @@ impl ::core::cmp::Ord for Enum1 { #[inline] fn cmp(&self, other: &Enum1) -> ::core::cmp::Ordering { match (self, other) { - (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg_1_0 }) => - ::core::cmp::Ord::cmp(__self_0, __arg_1_0), + (Enum1::Single { x: __self_0 }, Enum1::Single { x: __arg1_0 }) => + ::core::cmp::Ord::cmp(__self_0, __arg1_0), } } } @@ -883,9 +883,9 @@ impl ::core::marker::StructuralPartialEq for Fieldless {} impl ::core::cmp::PartialEq for Fieldless { #[inline] fn eq(&self, other: &Fieldless) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { _ => true, } } else { false } } @@ -905,15 +905,15 @@ impl ::core::cmp::PartialOrd for Fieldless { #[inline] fn partial_cmp(&self, other: &Fieldless) -> ::core::option::Option<::core::cmp::Ordering> { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), } } else { - ::core::cmp::PartialOrd::partial_cmp(&__self_vi, &__arg_1_vi) + ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) } } } @@ -922,11 +922,11 @@ impl ::core::cmp::PartialOrd for Fieldless { impl ::core::cmp::Ord for Fieldless { #[inline] fn cmp(&self, other: &Fieldless) -> ::core::cmp::Ordering { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { _ => ::core::cmp::Ordering::Equal, } - } else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) } + } else { ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } } } @@ -1005,30 +1005,30 @@ impl ::core::marker::StructuralPartialEq for Mixed {} impl ::core::cmp::PartialEq for Mixed { #[inline] fn eq(&self, other: &Mixed) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => - *__self_0 == *__arg_1_0, + (Mixed::R(__self_0), Mixed::R(__arg1_0)) => + *__self_0 == *__arg1_0, (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { - d1: __arg_1_0, d2: __arg_1_1 }) => - *__self_0 == *__arg_1_0 && *__self_1 == *__arg_1_1, + d1: __arg1_0, d2: __arg1_1 }) => + *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1, _ => true, } } else { false } } #[inline] fn ne(&self, other: &Mixed) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => - *__self_0 != *__arg_1_0, + (Mixed::R(__self_0), Mixed::R(__arg1_0)) => + *__self_0 != *__arg1_0, (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { - d1: __arg_1_0, d2: __arg_1_1 }) => - *__self_0 != *__arg_1_0 || *__self_1 != *__arg_1_1, + d1: __arg1_0, d2: __arg1_1 }) => + *__self_0 != *__arg1_0 || *__self_1 != *__arg1_1, _ => false, } } else { true } @@ -1051,26 +1051,25 @@ impl ::core::cmp::PartialOrd for Mixed { #[inline] fn partial_cmp(&self, other: &Mixed) -> ::core::option::Option<::core::cmp::Ordering> { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), + (Mixed::R(__self_0), Mixed::R(__arg1_0)) => + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { - d1: __arg_1_0, d2: __arg_1_1 }) => + d1: __arg1_0, d2: __arg1_1 }) => match ::core::cmp::PartialOrd::partial_cmp(__self_0, - __arg_1_0) { + __arg1_0) { ::core::option::Option::Some(::core::cmp::Ordering::Equal) - => - ::core::cmp::PartialOrd::partial_cmp(__self_1, __arg_1_1), + => ::core::cmp::PartialOrd::partial_cmp(__self_1, __arg1_1), cmp => cmp, }, _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), } } else { - ::core::cmp::PartialOrd::partial_cmp(&__self_vi, &__arg_1_vi) + ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) } } } @@ -1079,22 +1078,22 @@ impl ::core::cmp::PartialOrd for Mixed { impl ::core::cmp::Ord for Mixed { #[inline] fn cmp(&self, other: &Mixed) -> ::core::cmp::Ordering { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Mixed::R(__self_0), Mixed::R(__arg_1_0)) => - ::core::cmp::Ord::cmp(__self_0, __arg_1_0), + (Mixed::R(__self_0), Mixed::R(__arg1_0)) => + ::core::cmp::Ord::cmp(__self_0, __arg1_0), (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { - d1: __arg_1_0, d2: __arg_1_1 }) => - match ::core::cmp::Ord::cmp(__self_0, __arg_1_0) { + d1: __arg1_0, d2: __arg1_1 }) => + match ::core::cmp::Ord::cmp(__self_0, __arg1_0) { ::core::cmp::Ordering::Equal => - ::core::cmp::Ord::cmp(__self_1, __arg_1_1), + ::core::cmp::Ord::cmp(__self_1, __arg1_1), cmp => cmp, }, _ => ::core::cmp::Ordering::Equal, } - } else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) } + } else { ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } } } @@ -1162,32 +1161,32 @@ impl ::core::marker::StructuralPartialEq for Fielded {} impl ::core::cmp::PartialEq for Fielded { #[inline] fn eq(&self, other: &Fielded) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => - *__self_0 == *__arg_1_0, - (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => - *__self_0 == *__arg_1_0, - (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => - *__self_0 == *__arg_1_0, + (Fielded::X(__self_0), Fielded::X(__arg1_0)) => + *__self_0 == *__arg1_0, + (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => + *__self_0 == *__arg1_0, + (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => + *__self_0 == *__arg1_0, _ => unsafe { ::core::intrinsics::unreachable() } } } else { false } } #[inline] fn ne(&self, other: &Fielded) -> bool { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => - *__self_0 != *__arg_1_0, - (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => - *__self_0 != *__arg_1_0, - (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => - *__self_0 != *__arg_1_0, + (Fielded::X(__self_0), Fielded::X(__arg1_0)) => + *__self_0 != *__arg1_0, + (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => + *__self_0 != *__arg1_0, + (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => + *__self_0 != *__arg1_0, _ => unsafe { ::core::intrinsics::unreachable() } } } else { true } @@ -1212,20 +1211,20 @@ impl ::core::cmp::PartialOrd for Fielded { #[inline] fn partial_cmp(&self, other: &Fielded) -> ::core::option::Option<::core::cmp::Ordering> { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), - (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), - (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => - ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg_1_0), + (Fielded::X(__self_0), Fielded::X(__arg1_0)) => + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), + (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), + (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => + ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), _ => unsafe { ::core::intrinsics::unreachable() } } } else { - ::core::cmp::PartialOrd::partial_cmp(&__self_vi, &__arg_1_vi) + ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) } } } @@ -1234,19 +1233,19 @@ impl ::core::cmp::PartialOrd for Fielded { impl ::core::cmp::Ord for Fielded { #[inline] fn cmp(&self, other: &Fielded) -> ::core::cmp::Ordering { - let __self_vi = ::core::intrinsics::discriminant_value(self); - let __arg_1_vi = ::core::intrinsics::discriminant_value(other); - if __self_vi == __arg_1_vi { + let __self_tag = ::core::intrinsics::discriminant_value(self); + let __arg1_tag = ::core::intrinsics::discriminant_value(other); + if __self_tag == __arg1_tag { match (self, other) { - (Fielded::X(__self_0), Fielded::X(__arg_1_0)) => - ::core::cmp::Ord::cmp(__self_0, __arg_1_0), - (Fielded::Y(__self_0), Fielded::Y(__arg_1_0)) => - ::core::cmp::Ord::cmp(__self_0, __arg_1_0), - (Fielded::Z(__self_0), Fielded::Z(__arg_1_0)) => - ::core::cmp::Ord::cmp(__self_0, __arg_1_0), + (Fielded::X(__self_0), Fielded::X(__arg1_0)) => + ::core::cmp::Ord::cmp(__self_0, __arg1_0), + (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => + ::core::cmp::Ord::cmp(__self_0, __arg1_0), + (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => + ::core::cmp::Ord::cmp(__self_0, __arg1_0), _ => unsafe { ::core::intrinsics::unreachable() } } - } else { ::core::cmp::Ord::cmp(&__self_vi, &__arg_1_vi) } + } else { ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } } } From f1d9e2b50c71afe82ead3e99069a3fb1f96301b8 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 8 Jul 2022 15:27:42 +1000 Subject: [PATCH 08/11] Avoid some unnecessary blocks in derive output. --- .../src/deriving/generic/mod.rs | 8 ++++++++ src/test/ui/deriving/deriving-all-codegen.stdout | 15 ++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 2e21d197cdf8e..498d5d69ba48c 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -339,11 +339,19 @@ impl BlockOrExpr { // Converts it into an expression. fn into_expr(self, cx: &ExtCtxt<'_>, span: Span) -> P { if self.0.is_empty() { + // No statements. match self.1 { None => cx.expr_block(cx.block(span, vec![])), Some(expr) => expr, } + } else if self.0.len() == 1 + && let ast::StmtKind::Expr(expr) = &self.0[0].kind + && self.1.is_none() + { + // There's only a single statement expression. Pull it out. + expr.clone() } else { + // Multiple statements and/or expressions. cx.expr_block(self.into_block(cx, span)) } } diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 0337ca2634fb6..0e222d131d741 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -697,9 +697,8 @@ impl ::core::fmt::Debug for Enum1 { impl ::core::hash::Hash for Enum1 { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { match self { - Enum1::Single { x: __self_0 } => { - ::core::hash::Hash::hash(__self_0, state) - } + Enum1::Single { x: __self_0 } => + ::core::hash::Hash::hash(__self_0, state), } } } @@ -870,10 +869,9 @@ impl ::core::default::Default for Fieldless { impl ::core::hash::Hash for Fieldless { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { match self { - _ => { + _ => ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state) - } + state), } } } @@ -992,10 +990,9 @@ impl ::core::hash::Hash for Mixed { ::core::hash::Hash::hash(__self_0, state); ::core::hash::Hash::hash(__self_1, state) } - _ => { + _ => ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state) - } + state), } } } From 4bcbd76bc9f8e3a22b251cbdfb21ba7815d61c7f Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 8 Jul 2022 15:31:09 +1000 Subject: [PATCH 09/11] Move the no-variants handling code earlier in `expand_enum_method_body`. To avoid computing a bunch of stuff that it doesn't need. --- .../rustc_builtin_macros/src/deriving/generic/mod.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 498d5d69ba48c..c953cc36534f2 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -1173,6 +1173,12 @@ impl<'a> MethodDef<'a> { let span = trait_.span; let variants = &enum_def.variants; + // There is no sensible code to be generated for *any* deriving on a + // zero-variant enum. So we just generate a failing expression. + if variants.is_empty() { + return BlockOrExpr(vec![], Some(deriving::call_unreachable(cx, span))); + } + let prefixes = iter::once("__self".to_string()) .chain( selflike_args @@ -1365,11 +1371,6 @@ impl<'a> MethodDef<'a> { let all_match = cx.expr_match(span, match_arg, match_arms); let arm_expr = cx.expr_if(span, discriminant_test, all_match, Some(arm_expr)); BlockOrExpr(index_let_stmts, Some(arm_expr)) - } else if variants.is_empty() { - // There is no sensible code to be generated for *any* deriving on - // a zero-variant enum. So we just generate a failing expression - // for the zero variant case. - BlockOrExpr(vec![], Some(deriving::call_unreachable(cx, span))) } else { let match_arg = if selflike_args.len() == 1 { selflike_args.pop().unwrap() From 10144e29afb432425fc88cd2534577b4efa05937 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 8 Jul 2022 15:32:27 +1000 Subject: [PATCH 10/11] Handle tags better. Currently, for the enums and comparison traits we always check the tag for equality before doing anything else. This is a bit clumsy. This commit changes things so that the tags are handled very much like a zeroth field in the enum. For `eq`/ne` this makes the code slightly cleaner. For `partial_cmp` and `cmp` it's a more notable change: in the case where the tags aren't equal, instead of having a tag equality check followed by a tag comparison, it just does a single tag comparison. The commit also improves how `Hash` works for enums: instead of having duplicated code to hash the tag for every arm within the match, we do it just once before the match. All this required replacing the `EnumNonMatchingCollapsed` value with a new `EnumTag` value. For fieldless enums the new code is particularly improved. All the code now produced is close to optimal, being very similar to what you'd write by hand. --- .../src/deriving/clone.rs | 6 +- .../src/deriving/cmp/ord.rs | 10 - .../src/deriving/cmp/partial_eq.rs | 1 - .../src/deriving/cmp/partial_ord.rs | 11 - .../src/deriving/debug.rs | 4 +- .../src/deriving/encodable.rs | 2 +- .../src/deriving/generic/mod.rs | 308 +++++++++--------- .../rustc_builtin_macros/src/deriving/hash.rs | 36 +- .../ui/deriving/deriving-all-codegen.stdout | 196 +++++------ 9 files changed, 245 insertions(+), 329 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/clone.rs b/compiler/rustc_builtin_macros/src/deriving/clone.rs index 72aa8e17d01f9..7755ff779c4d9 100644 --- a/compiler/rustc_builtin_macros/src/deriving/clone.rs +++ b/compiler/rustc_builtin_macros/src/deriving/clone.rs @@ -148,7 +148,7 @@ fn cs_clone_simple( ), } } - BlockOrExpr::new_mixed(stmts, cx.expr_deref(trait_span, cx.expr_self(trait_span))) + BlockOrExpr::new_mixed(stmts, Some(cx.expr_deref(trait_span, cx.expr_self(trait_span)))) } fn cs_clone( @@ -177,9 +177,7 @@ fn cs_clone( all_fields = af; vdata = &variant.data; } - EnumNonMatchingCollapsed(..) => { - cx.span_bug(trait_span, &format!("non-matching enum variants in `derive({})`", name,)) - } + EnumTag(..) => cx.span_bug(trait_span, &format!("enum tags in `derive({})`", name,)), StaticEnum(..) | StaticStruct(..) => { cx.span_bug(trait_span, &format!("associated function in `derive({})`", name)) } diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs index aad0ab3f5bb3b..1612be862377b 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs @@ -73,16 +73,6 @@ pub fn cs_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> Bl cx.expr_match(span, expr2, vec![eq_arm, neq_arm]) } CsFold::Fieldless => cx.expr_path(equal_path.clone()), - CsFold::EnumNonMatching(span, tag_tuple) => { - if tag_tuple.len() != 2 { - cx.span_bug(span, "not exactly 2 arguments in `derive(Ord)`") - } else { - let lft = cx.expr_addr_of(span, cx.expr_ident(span, tag_tuple[0])); - let rgt = cx.expr_addr_of(span, cx.expr_ident(span, tag_tuple[1])); - let fn_cmp_path = cx.std_path(&[sym::cmp, sym::Ord, sym::cmp]); - cx.expr_call_global(span, fn_cmp_path, vec![lft, rgt]) - } - } }, ); BlockOrExpr::new_expr(expr) diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs index 83534b62b46c7..0141b33772621 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs @@ -51,7 +51,6 @@ pub fn expand_deriving_partial_eq( } CsFold::Combine(span, expr1, expr2) => cx.expr_binary(span, combiner, expr1, expr2), CsFold::Fieldless => cx.expr_bool(span, base), - CsFold::EnumNonMatching(span, _tag_tuple) => cx.expr_bool(span, !base), }, ); BlockOrExpr::new_expr(expr) diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs index ce45b0e196556..2ebb01cc8a035 100644 --- a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs +++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs @@ -82,17 +82,6 @@ pub fn cs_partial_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_ cx.expr_match(span, expr2, vec![eq_arm, neq_arm]) } CsFold::Fieldless => cx.expr_some(span, cx.expr_path(equal_path.clone())), - CsFold::EnumNonMatching(span, tag_tuple) => { - if tag_tuple.len() != 2 { - cx.span_bug(span, "not exactly 2 arguments in `derive(PartialOrd)`") - } else { - let lft = cx.expr_addr_of(span, cx.expr_ident(span, tag_tuple[0])); - let rgt = cx.expr_addr_of(span, cx.expr_ident(span, tag_tuple[1])); - let fn_partial_cmp_path = - cx.std_path(&[sym::cmp, sym::PartialOrd, sym::partial_cmp]); - cx.expr_call_global(span, fn_partial_cmp_path, vec![lft, rgt]) - } - } }, ); BlockOrExpr::new_expr(expr) diff --git a/compiler/rustc_builtin_macros/src/deriving/debug.rs b/compiler/rustc_builtin_macros/src/deriving/debug.rs index 5de2052048692..ceef893e862eb 100644 --- a/compiler/rustc_builtin_macros/src/deriving/debug.rs +++ b/compiler/rustc_builtin_macros/src/deriving/debug.rs @@ -45,7 +45,7 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_> let (ident, vdata, fields) = match substr.fields { Struct(vdata, fields) => (substr.type_ident, *vdata, fields), EnumMatching(_, _, v, fields) => (v.ident, &v.data, fields), - EnumNonMatchingCollapsed(..) | StaticStruct(..) | StaticEnum(..) => { + EnumTag(..) | StaticStruct(..) | StaticEnum(..) => { cx.span_bug(span, "nonsensical .fields in `#[derive(Debug)]`") } }; @@ -176,6 +176,6 @@ fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_> stmts.push(names_let.unwrap()); } stmts.push(values_let); - BlockOrExpr::new_mixed(stmts, expr) + BlockOrExpr::new_mixed(stmts, Some(expr)) } } diff --git a/compiler/rustc_builtin_macros/src/deriving/encodable.rs b/compiler/rustc_builtin_macros/src/deriving/encodable.rs index 49dbe51f7626c..70167cac68a7e 100644 --- a/compiler/rustc_builtin_macros/src/deriving/encodable.rs +++ b/compiler/rustc_builtin_macros/src/deriving/encodable.rs @@ -287,7 +287,7 @@ fn encodable_substructure( fn_emit_enum_path, vec![encoder, cx.expr_str(trait_span, substr.type_ident.name), blk], ); - BlockOrExpr::new_mixed(vec![me], expr) + BlockOrExpr::new_mixed(vec![me], Some(expr)) } _ => cx.bug("expected Struct or EnumMatching in derive(Encodable)"), diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index c953cc36534f2..7ff75592a52fb 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -21,21 +21,14 @@ //! `struct T(i32, char)`). //! - `EnumMatching`, when `Self` is an enum and all the arguments are the //! same variant of the enum (e.g., `Some(1)`, `Some(3)` and `Some(4)`) -//! - `EnumNonMatchingCollapsed` when `Self` is an enum and the arguments -//! are not the same variant (e.g., `None`, `Some(1)` and `None`). +//! - `EnumTag` when `Self` is an enum, for comparing the enum tags. //! - `StaticEnum` and `StaticStruct` for static methods, where the type //! being derived upon is either an enum or struct respectively. (Any //! argument with type Self is just grouped among the non-self //! arguments.) //! //! In the first two cases, the values from the corresponding fields in -//! all the arguments are grouped together. For `EnumNonMatchingCollapsed` -//! this isn't possible (different variants have different fields), so the -//! fields are inaccessible. (Previous versions of the deriving infrastructure -//! had a way to expand into code that could access them, at the cost of -//! generating exponential amounts of code; see issue #15375). There are no -//! fields with values in the static cases, so these are treated entirely -//! differently. +//! all the arguments are grouped together. //! //! The non-static cases have `Option` in several places associated //! with field `expr`s. This represents the name of the field it is @@ -142,21 +135,15 @@ //! }]) //! ``` //! -//! For `C0(a)` and `C1 {x}` , +//! For the tags, //! //! ```{.text} -//! EnumNonMatchingCollapsed( -//! &[, ]) +//! EnumTag( +//! &[, ], ) //! ``` -//! -//! It is the same for when the arguments are flipped to `C1 {x}` and -//! `C0(a)`; the only difference is what the values of the identifiers -//! and will -//! be in the generated code. -//! -//! `EnumNonMatchingCollapsed` deliberately provides far less information -//! than is generally available for a given pair of variants; see #15375 -//! for discussion. +//! Note that this setup doesn't allow for the brute-force "match every variant +//! against every other variant" approach, which is bad because it produces a +//! quadratic amount of code (see #15375). //! //! ## Static //! @@ -180,7 +167,7 @@ use std::iter; use std::vec; use rustc_ast::ptr::P; -use rustc_ast::{self as ast, BinOpKind, EnumDef, Expr, Generics, PatKind}; +use rustc_ast::{self as ast, EnumDef, Expr, Generics, PatKind}; use rustc_ast::{GenericArg, GenericParamKind, VariantData}; use rustc_attr as attr; use rustc_expand::base::{Annotatable, ExtCtxt}; @@ -235,6 +222,8 @@ pub struct MethodDef<'a> { pub attributes: Vec, /// Can we combine fieldless variants for enums into a single match arm? + /// If true, indicates that the trait operation uses the enum tag in some + /// way. pub unify_fieldless_variants: bool, pub combine_substructure: RefCell>, @@ -274,19 +263,22 @@ pub enum StaticFields { /// A summary of the possible sets of fields. pub enum SubstructureFields<'a> { + /// A non-static method with `Self` is a struct. Struct(&'a ast::VariantData, Vec), + /// Matching variants of the enum: variant index, variant count, ast::Variant, /// fields: the field name is only non-`None` in the case of a struct /// variant. EnumMatching(usize, usize, &'a ast::Variant, Vec), - /// Non-matching variants of the enum, but with all state hidden from the - /// consequent code. The field is a list of `Ident`s bound to the variant - /// index values for each of the actual input `Self` arguments. - EnumNonMatchingCollapsed(&'a [Ident]), + /// The tag of an enum. The first field is a `FieldInfo` for the tags, as + /// if they were fields. The second field is the expression to combine the + /// tag expression with; it will be `None` if no match is necessary. + EnumTag(FieldInfo, Option>), /// A static method where `Self` is a struct. StaticStruct(&'a ast::VariantData, StaticFields), + /// A static method where `Self` is an enum. StaticEnum(&'a ast::EnumDef, Vec<(Ident, Span, StaticFields)>), } @@ -324,8 +316,8 @@ impl BlockOrExpr { BlockOrExpr(vec![], Some(expr)) } - pub fn new_mixed(stmts: Vec, expr: P) -> BlockOrExpr { - BlockOrExpr(stmts, Some(expr)) + pub fn new_mixed(stmts: Vec, expr: Option>) -> BlockOrExpr { + BlockOrExpr(stmts, expr) } // Converts it into a block. @@ -339,7 +331,6 @@ impl BlockOrExpr { // Converts it into an expression. fn into_expr(self, cx: &ExtCtxt<'_>, span: Span) -> P { if self.0.is_empty() { - // No statements. match self.1 { None => cx.expr_block(cx.block(span, vec![])), Some(expr) => expr, @@ -1135,44 +1126,34 @@ impl<'a> MethodDef<'a> { /// fn eq(&self, other: &A) -> bool { /// let __self_tag = ::core::intrinsics::discriminant_value(self); /// let __arg1_tag = ::core::intrinsics::discriminant_value(other); - /// if __self_tag == __arg1_tag { + /// __self_tag == __arg1_tag && /// match (self, other) { /// (A::A2(__self_0), A::A2(__arg1_0)) => /// *__self_0 == *__arg1_0, /// _ => true, /// } - /// } else { - /// false // catch-all handler - /// } /// } /// } /// ``` - /// Creates a match for a tuple of all `selflike_args`, where either all - /// variants match, or it falls into a catch-all for when one variant - /// does not match. - /// - /// There are N + 1 cases because is a case for each of the N - /// variants where all of the variants match, and one catch-all for - /// when one does not match. - /// - /// As an optimization we generate code which checks whether all variants - /// match first which makes llvm see that C-like enums can be compiled into - /// a simple equality check (for PartialEq). - /// - /// The catch-all handler is provided access the variant index values - /// for each of the selflike_args, carried in precomputed variables. + /// Creates a tag check combined with a match for a tuple of all + /// `selflike_args`, with an arm for each variant with fields, possibly an + /// arm for each fieldless variant (if `!unify_fieldless_variants` is not + /// true), and possibly a default arm. fn expand_enum_method_body<'b>( &self, cx: &mut ExtCtxt<'_>, trait_: &TraitDef<'b>, enum_def: &'b EnumDef, type_ident: Ident, - mut selflike_args: Vec>, + selflike_args: Vec>, nonselflike_args: &[P], ) -> BlockOrExpr { let span = trait_.span; let variants = &enum_def.variants; + // Traits that unify fieldless variants always use the tag(s). + let uses_tags = self.unify_fieldless_variants; + // There is no sensible code to be generated for *any* deriving on a // zero-variant enum. So we just generate a failing expression. if variants.is_empty() { @@ -1189,27 +1170,82 @@ impl<'a> MethodDef<'a> { ) .collect::>(); - // The `tag_idents` will be bound, solely in the catch-all, to - // a series of let statements mapping each selflike_arg to an int - // value corresponding to its discriminant. - let tag_idents = prefixes - .iter() - .map(|name| Ident::from_str_and_span(&format!("{}_tag", name), span)) - .collect::>(); + // Build a series of let statements mapping each selflike_arg + // to its discriminant value. + // + // e.g. for `PartialEq::eq` builds two statements: + // ``` + // let __self_tag = ::core::intrinsics::discriminant_value(self); + // let __arg1_tag = ::core::intrinsics::discriminant_value(other); + // ``` + let get_tag_pieces = |cx: &ExtCtxt<'_>| { + let tag_idents: Vec<_> = prefixes + .iter() + .map(|name| Ident::from_str_and_span(&format!("{}_tag", name), span)) + .collect(); - // Builds, via callback to call_substructure_method, the - // delegated expression that handles the catch-all case, - // using `__variants_tuple` to drive logic if necessary. - let catch_all_substructure = EnumNonMatchingCollapsed(&tag_idents); + let mut tag_exprs: Vec<_> = tag_idents + .iter() + .map(|&ident| cx.expr_addr_of(span, cx.expr_ident(span, ident))) + .collect(); - let first_fieldless = variants.iter().find(|v| v.data.fields().is_empty()); + let self_expr = tag_exprs.remove(0); + let other_selflike_exprs = tag_exprs; + let tag_field = FieldInfo { span, name: None, self_expr, other_selflike_exprs }; + + let tag_let_stmts: Vec<_> = iter::zip(&tag_idents, &selflike_args) + .map(|(&ident, selflike_arg)| { + let variant_value = deriving::call_intrinsic( + cx, + span, + sym::discriminant_value, + vec![selflike_arg.clone()], + ); + cx.stmt_let(span, false, ident, variant_value) + }) + .collect(); + + (tag_field, tag_let_stmts) + }; + + // There are some special cases involving fieldless enums where no + // match is necessary. + let all_fieldless = variants.iter().all(|v| v.data.fields().is_empty()); + if all_fieldless { + if uses_tags && variants.len() > 1 { + // If the type is fieldless and the trait uses the tag and + // there are multiple variants, we need just an operation on + // the tag(s). + let (tag_field, mut tag_let_stmts) = get_tag_pieces(cx); + let mut tag_check = self.call_substructure_method( + cx, + trait_, + type_ident, + nonselflike_args, + &EnumTag(tag_field, None), + ); + tag_let_stmts.append(&mut tag_check.0); + return BlockOrExpr(tag_let_stmts, tag_check.1); + } + + if variants.len() == 1 { + // If there is a single variant, we don't need an operation on + // the tag(s). Just use the most degenerate result. + return self.call_substructure_method( + cx, + trait_, + type_ident, + nonselflike_args, + &EnumMatching(0, 1, &variants[0], Vec::new()), + ); + }; + } // These arms are of the form: // (Variant1, Variant1, ...) => Body1 // (Variant2, Variant2, ...) => Body2 // ... // where each tuple has length = selflike_args.len() - let mut match_arms: Vec = variants .iter() .enumerate() @@ -1233,7 +1269,7 @@ impl<'a> MethodDef<'a> { use_ref_pat, ); - // Here is the pat = `(&VariantK, &VariantK, ...)` + // `(VariantK, VariantK, ...)` or just `VariantK`. let single_pat = if subpats.len() == 1 { subpats.pop().unwrap() } else { @@ -1263,27 +1299,28 @@ impl<'a> MethodDef<'a> { }) .collect(); + // Add a default arm to the match, if necessary. + let first_fieldless = variants.iter().find(|v| v.data.fields().is_empty()); let default = match first_fieldless { Some(v) if self.unify_fieldless_variants => { - // We need a default case that handles the fieldless variants. - // The index and actual variant aren't meaningful in this case, - // so just use whatever - let substructure = EnumMatching(0, variants.len(), v, Vec::new()); + // We need a default case that handles all the fieldless + // variants. The index and actual variant aren't meaningful in + // this case, so just use dummy values. Some( self.call_substructure_method( cx, trait_, type_ident, nonselflike_args, - &substructure, + &EnumMatching(0, variants.len(), v, Vec::new()), ) .into_expr(cx, span), ) } _ if variants.len() > 1 && selflike_args.len() > 1 => { - // Since we know that all the arguments will match if we reach + // Because we know that all the arguments will match if we reach // the match expression we add the unreachable intrinsics as the - // result of the catch all which should help llvm in optimizing it + // result of the default which should help llvm in optimizing it. Some(deriving::call_unreachable(cx, span)) } _ => None, @@ -1292,92 +1329,41 @@ impl<'a> MethodDef<'a> { match_arms.push(cx.arm(span, cx.pat_wild(span), arm)); } - // We will usually need the catch-all after matching the - // tuples `(VariantK, VariantK, ...)` for each VariantK of the - // enum. But: - // - // * when there is only one Self arg, the arms above suffice - // (and the deriving we call back into may not be prepared to - // handle EnumNonMatchCollapsed), and, - // - // * when the enum has only one variant, the single arm that - // is already present always suffices. - // - // * In either of the two cases above, if we *did* add a - // catch-all `_` match, it would trigger the - // unreachable-pattern error. - // - if variants.len() > 1 && selflike_args.len() > 1 { - // Build a series of let statements mapping each selflike_arg - // to its discriminant value. - // - // i.e., for `enum E { A, B(1), C(T, T) }` for `PartialEq::eq`, - // builds two statements: - // ``` - // let __self_tag = ::core::intrinsics::discriminant_value(self); - // let __arg1_tag = ::core::intrinsics::discriminant_value(other); - // ``` - let mut index_let_stmts: Vec = Vec::with_capacity(tag_idents.len() + 1); - - // We also build an expression which checks whether all discriminants are equal, e.g. - // `__self_tag == __arg1_tag`. - let mut discriminant_test = cx.expr_bool(span, true); - for (i, (&ident, selflike_arg)) in iter::zip(&tag_idents, &selflike_args).enumerate() { - let variant_value = deriving::call_intrinsic( - cx, - span, - sym::discriminant_value, - vec![selflike_arg.clone()], - ); - let let_stmt = cx.stmt_let(span, false, ident, variant_value); - index_let_stmts.push(let_stmt); - - if i > 0 { - let id0 = cx.expr_ident(span, tag_idents[0]); - let id = cx.expr_ident(span, ident); - let test = cx.expr_binary(span, BinOpKind::Eq, id0, id); - discriminant_test = if i == 1 { - test - } else { - cx.expr_binary(span, BinOpKind::And, discriminant_test, test) - }; - } - } - - let arm_expr = self - .call_substructure_method( - cx, - trait_, - type_ident, - nonselflike_args, - &catch_all_substructure, - ) - .into_expr(cx, span); - - let match_arg = cx.expr(span, ast::ExprKind::Tup(selflike_args)); - - // Lastly we create an expression which branches on all discriminants being equal, e.g. - // if __self_tag == _arg1_tag { - // match (self, other) { - // (Variant1, Variant1, ...) => Body1 - // (Variant2, Variant2, ...) => Body2, - // ... - // _ => ::core::intrinsics::unreachable() - // } - // } - // else { - // - // } - let all_match = cx.expr_match(span, match_arg, match_arms); - let arm_expr = cx.expr_if(span, discriminant_test, all_match, Some(arm_expr)); - BlockOrExpr(index_let_stmts, Some(arm_expr)) - } else { + // Create a match expression with one arm per discriminant plus + // possibly a default arm, e.g.: + // match (self, other) { + // (Variant1, Variant1, ...) => Body1 + // (Variant2, Variant2, ...) => Body2, + // ... + // _ => ::core::intrinsics::unreachable() + // } + let get_match_expr = |mut selflike_args: Vec>| { let match_arg = if selflike_args.len() == 1 { selflike_args.pop().unwrap() } else { cx.expr(span, ast::ExprKind::Tup(selflike_args)) }; - BlockOrExpr(vec![], Some(cx.expr_match(span, match_arg, match_arms))) + cx.expr_match(span, match_arg, match_arms) + }; + + // If the trait uses the tag and there are multiple variants, we need + // to add a tag check operation before the match. Otherwise, the match + // is enough. + if uses_tags && variants.len() > 1 { + let (tag_field, mut tag_let_stmts) = get_tag_pieces(cx); + + // Combine a tag check with the match. + let mut tag_check_plus_match = self.call_substructure_method( + cx, + trait_, + type_ident, + nonselflike_args, + &EnumTag(tag_field, Some(get_match_expr(selflike_args))), + ); + tag_let_stmts.append(&mut tag_check_plus_match.0); + BlockOrExpr(tag_let_stmts, tag_check_plus_match.1) + } else { + BlockOrExpr(vec![], Some(get_match_expr(selflike_args))) } } @@ -1591,11 +1577,6 @@ pub enum CsFold<'a> { // The fallback case for a struct or enum variant with no fields. Fieldless, - - /// The fallback case for non-matching enum variants. The slice is the - /// identifiers holding the variant index value for each of the `Self` - /// arguments. - EnumNonMatching(Span, &'a [Ident]), } /// Folds over fields, combining the expressions for each field in a sequence. @@ -1610,8 +1591,8 @@ pub fn cs_fold( where F: FnMut(&mut ExtCtxt<'_>, CsFold<'_>) -> P, { - match *substructure.fields { - EnumMatching(.., ref all_fields) | Struct(_, ref all_fields) => { + match substructure.fields { + EnumMatching(.., all_fields) | Struct(_, all_fields) => { if all_fields.is_empty() { return f(cx, CsFold::Fieldless); } @@ -1635,7 +1616,18 @@ where rest.iter().rfold(base_expr, op) } } - EnumNonMatchingCollapsed(tuple) => f(cx, CsFold::EnumNonMatching(trait_span, tuple)), + EnumTag(tag_field, match_expr) => { + let tag_check_expr = f(cx, CsFold::Single(tag_field)); + if let Some(match_expr) = match_expr { + if use_foldl { + f(cx, CsFold::Combine(trait_span, tag_check_expr, match_expr.clone())) + } else { + f(cx, CsFold::Combine(trait_span, match_expr.clone(), tag_check_expr)) + } + } else { + tag_check_expr + } + } StaticEnum(..) | StaticStruct(..) => cx.span_bug(trait_span, "static function in `derive`"), } } diff --git a/compiler/rustc_builtin_macros/src/deriving/hash.rs b/compiler/rustc_builtin_macros/src/deriving/hash.rs index 52239eff5da57..32ae3d3447896 100644 --- a/compiler/rustc_builtin_macros/src/deriving/hash.rs +++ b/compiler/rustc_builtin_macros/src/deriving/hash.rs @@ -1,6 +1,6 @@ use crate::deriving::generic::ty::*; use crate::deriving::generic::*; -use crate::deriving::{self, path_std, pathvec_std}; +use crate::deriving::{path_std, pathvec_std}; use rustc_ast::{MetaItem, Mutability}; use rustc_expand::base::{Annotatable, ExtCtxt}; @@ -61,32 +61,20 @@ fn hash_substructure( let expr = cx.expr_call(span, hash_path, vec![expr, state_expr.clone()]); cx.stmt_expr(expr) }; - let mut stmts = Vec::new(); - let fields = match substr.fields { - Struct(_, fs) | EnumMatching(_, 1, .., fs) => fs, - EnumMatching(.., fs) => { - let variant_value = cx.expr_addr_of( - trait_span, - deriving::call_intrinsic( - cx, - trait_span, - sym::discriminant_value, - vec![cx.expr_self(trait_span)], - ), - ); - - stmts.push(call_hash(trait_span, variant_value)); - - fs + let (stmts, match_expr) = match substr.fields { + Struct(_, fields) | EnumMatching(.., fields) => { + let stmts = + fields.iter().map(|field| call_hash(field.span, field.self_expr.clone())).collect(); + (stmts, None) + } + EnumTag(tag_field, match_expr) => { + assert!(tag_field.other_selflike_exprs.is_empty()); + let stmts = vec![call_hash(tag_field.span, tag_field.self_expr.clone())]; + (stmts, match_expr.clone()) } _ => cx.span_bug(trait_span, "impossible substructure in `derive(Hash)`"), }; - stmts.extend( - fields - .iter() - .map(|FieldInfo { ref self_expr, span, .. }| call_hash(*span, self_expr.clone())), - ); - BlockOrExpr::new_stmts(stmts) + BlockOrExpr::new_mixed(stmts, match_expr) } diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index 0e222d131d741..e129f25b0dd0b 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -766,17 +766,13 @@ enum Fieldless1 { #[allow(unused_qualifications)] impl ::core::clone::Clone for Fieldless1 { #[inline] - fn clone(&self) -> Fieldless1 { - match self { Fieldless1::A => Fieldless1::A, } - } + fn clone(&self) -> Fieldless1 { Fieldless1::A } } #[automatically_derived] #[allow(unused_qualifications)] impl ::core::fmt::Debug for Fieldless1 { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - match self { - Fieldless1::A => ::core::fmt::Formatter::write_str(f, "A"), - } + ::core::fmt::Formatter::write_str(f, "A") } } #[automatically_derived] @@ -788,18 +784,14 @@ impl ::core::default::Default for Fieldless1 { #[automatically_derived] #[allow(unused_qualifications)] impl ::core::hash::Hash for Fieldless1 { - fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match self { _ => {} } - } + fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {} } impl ::core::marker::StructuralPartialEq for Fieldless1 {} #[automatically_derived] #[allow(unused_qualifications)] impl ::core::cmp::PartialEq for Fieldless1 { #[inline] - fn eq(&self, other: &Fieldless1) -> bool { - match (self, other) { _ => true, } - } + fn eq(&self, other: &Fieldless1) -> bool { true } } impl ::core::marker::StructuralEq for Fieldless1 {} #[automatically_derived] @@ -816,9 +808,7 @@ impl ::core::cmp::PartialOrd for Fieldless1 { #[inline] fn partial_cmp(&self, other: &Fieldless1) -> ::core::option::Option<::core::cmp::Ordering> { - match (self, other) { - _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), - } + ::core::option::Option::Some(::core::cmp::Ordering::Equal) } } #[automatically_derived] @@ -826,7 +816,7 @@ impl ::core::cmp::PartialOrd for Fieldless1 { impl ::core::cmp::Ord for Fieldless1 { #[inline] fn cmp(&self, other: &Fieldless1) -> ::core::cmp::Ordering { - match (self, other) { _ => ::core::cmp::Ordering::Equal, } + ::core::cmp::Ordering::Equal } } @@ -868,11 +858,8 @@ impl ::core::default::Default for Fieldless { #[allow(unused_qualifications)] impl ::core::hash::Hash for Fieldless { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - match self { - _ => - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state), - } + let __self_tag = ::core::intrinsics::discriminant_value(self); + ::core::hash::Hash::hash(&__self_tag, state) } } impl ::core::marker::StructuralPartialEq for Fieldless {} @@ -883,9 +870,7 @@ impl ::core::cmp::PartialEq for Fieldless { fn eq(&self, other: &Fieldless) -> bool { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { _ => true, } - } else { false } + __self_tag == __arg1_tag } } impl ::core::marker::StructuralEq for Fieldless {} @@ -905,14 +890,7 @@ impl ::core::cmp::PartialOrd for Fieldless { -> ::core::option::Option<::core::cmp::Ordering> { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { - _ => - ::core::option::Option::Some(::core::cmp::Ordering::Equal), - } - } else { - ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) - } + ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) } } #[automatically_derived] @@ -922,9 +900,7 @@ impl ::core::cmp::Ord for Fieldless { fn cmp(&self, other: &Fieldless) -> ::core::cmp::Ordering { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { _ => ::core::cmp::Ordering::Equal, } - } else { ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } + ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } } @@ -978,21 +954,15 @@ impl ::core::default::Default for Mixed { #[allow(unused_qualifications)] impl ::core::hash::Hash for Mixed { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { + let __self_tag = ::core::intrinsics::discriminant_value(self); + ::core::hash::Hash::hash(&__self_tag, state); match self { - Mixed::R(__self_0) => { - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state); - ::core::hash::Hash::hash(__self_0, state) - } + Mixed::R(__self_0) => ::core::hash::Hash::hash(__self_0, state), Mixed::S { d1: __self_0, d2: __self_1 } => { - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state); ::core::hash::Hash::hash(__self_0, state); ::core::hash::Hash::hash(__self_1, state) } - _ => - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state), + _ => {} } } } @@ -1004,31 +974,29 @@ impl ::core::cmp::PartialEq for Mixed { fn eq(&self, other: &Mixed) -> bool { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { - (Mixed::R(__self_0), Mixed::R(__arg1_0)) => - *__self_0 == *__arg1_0, - (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { - d1: __arg1_0, d2: __arg1_1 }) => - *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1, - _ => true, - } - } else { false } + __self_tag == __arg1_tag && + match (self, other) { + (Mixed::R(__self_0), Mixed::R(__arg1_0)) => + *__self_0 == *__arg1_0, + (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { + d1: __arg1_0, d2: __arg1_1 }) => + *__self_0 == *__arg1_0 && *__self_1 == *__arg1_1, + _ => true, + } } #[inline] fn ne(&self, other: &Mixed) -> bool { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { - (Mixed::R(__self_0), Mixed::R(__arg1_0)) => - *__self_0 != *__arg1_0, - (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { - d1: __arg1_0, d2: __arg1_1 }) => - *__self_0 != *__arg1_0 || *__self_1 != *__arg1_1, - _ => false, - } - } else { true } + __self_tag != __arg1_tag || + match (self, other) { + (Mixed::R(__self_0), Mixed::R(__arg1_0)) => + *__self_0 != *__arg1_0, + (Mixed::S { d1: __self_0, d2: __self_1 }, Mixed::S { + d1: __arg1_0, d2: __arg1_1 }) => + *__self_0 != *__arg1_0 || *__self_1 != *__arg1_1, + _ => false, + } } } impl ::core::marker::StructuralEq for Mixed {} @@ -1050,7 +1018,8 @@ impl ::core::cmp::PartialOrd for Mixed { -> ::core::option::Option<::core::cmp::Ordering> { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { + match ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) { + ::core::option::Option::Some(::core::cmp::Ordering::Equal) => match (self, other) { (Mixed::R(__self_0), Mixed::R(__arg1_0)) => ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), @@ -1064,10 +1033,9 @@ impl ::core::cmp::PartialOrd for Mixed { }, _ => ::core::option::Option::Some(::core::cmp::Ordering::Equal), - } - } else { - ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) - } + }, + cmp => cmp, + } } } #[automatically_derived] @@ -1077,7 +1045,8 @@ impl ::core::cmp::Ord for Mixed { fn cmp(&self, other: &Mixed) -> ::core::cmp::Ordering { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { + match ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) { + ::core::cmp::Ordering::Equal => match (self, other) { (Mixed::R(__self_0), Mixed::R(__arg1_0)) => ::core::cmp::Ord::cmp(__self_0, __arg1_0), @@ -1089,8 +1058,9 @@ impl ::core::cmp::Ord for Mixed { cmp => cmp, }, _ => ::core::cmp::Ordering::Equal, - } - } else { ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } + }, + cmp => cmp, + } } } @@ -1133,22 +1103,12 @@ impl ::core::fmt::Debug for Fielded { #[allow(unused_qualifications)] impl ::core::hash::Hash for Fielded { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { + let __self_tag = ::core::intrinsics::discriminant_value(self); + ::core::hash::Hash::hash(&__self_tag, state); match self { - Fielded::X(__self_0) => { - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state); - ::core::hash::Hash::hash(__self_0, state) - } - Fielded::Y(__self_0) => { - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state); - ::core::hash::Hash::hash(__self_0, state) - } - Fielded::Z(__self_0) => { - ::core::hash::Hash::hash(&::core::intrinsics::discriminant_value(self), - state); - ::core::hash::Hash::hash(__self_0, state) - } + Fielded::X(__self_0) => ::core::hash::Hash::hash(__self_0, state), + Fielded::Y(__self_0) => ::core::hash::Hash::hash(__self_0, state), + Fielded::Z(__self_0) => ::core::hash::Hash::hash(__self_0, state), } } } @@ -1160,33 +1120,31 @@ impl ::core::cmp::PartialEq for Fielded { fn eq(&self, other: &Fielded) -> bool { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { - (Fielded::X(__self_0), Fielded::X(__arg1_0)) => - *__self_0 == *__arg1_0, - (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => - *__self_0 == *__arg1_0, - (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => - *__self_0 == *__arg1_0, - _ => unsafe { ::core::intrinsics::unreachable() } - } - } else { false } + __self_tag == __arg1_tag && + match (self, other) { + (Fielded::X(__self_0), Fielded::X(__arg1_0)) => + *__self_0 == *__arg1_0, + (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => + *__self_0 == *__arg1_0, + (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => + *__self_0 == *__arg1_0, + _ => unsafe { ::core::intrinsics::unreachable() } + } } #[inline] fn ne(&self, other: &Fielded) -> bool { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { - match (self, other) { - (Fielded::X(__self_0), Fielded::X(__arg1_0)) => - *__self_0 != *__arg1_0, - (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => - *__self_0 != *__arg1_0, - (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => - *__self_0 != *__arg1_0, - _ => unsafe { ::core::intrinsics::unreachable() } - } - } else { true } + __self_tag != __arg1_tag || + match (self, other) { + (Fielded::X(__self_0), Fielded::X(__arg1_0)) => + *__self_0 != *__arg1_0, + (Fielded::Y(__self_0), Fielded::Y(__arg1_0)) => + *__self_0 != *__arg1_0, + (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => + *__self_0 != *__arg1_0, + _ => unsafe { ::core::intrinsics::unreachable() } + } } } impl ::core::marker::StructuralEq for Fielded {} @@ -1210,7 +1168,8 @@ impl ::core::cmp::PartialOrd for Fielded { -> ::core::option::Option<::core::cmp::Ordering> { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { + match ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) { + ::core::option::Option::Some(::core::cmp::Ordering::Equal) => match (self, other) { (Fielded::X(__self_0), Fielded::X(__arg1_0)) => ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), @@ -1219,10 +1178,9 @@ impl ::core::cmp::PartialOrd for Fielded { (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => ::core::cmp::PartialOrd::partial_cmp(__self_0, __arg1_0), _ => unsafe { ::core::intrinsics::unreachable() } - } - } else { - ::core::cmp::PartialOrd::partial_cmp(&__self_tag, &__arg1_tag) - } + }, + cmp => cmp, + } } } #[automatically_derived] @@ -1232,7 +1190,8 @@ impl ::core::cmp::Ord for Fielded { fn cmp(&self, other: &Fielded) -> ::core::cmp::Ordering { let __self_tag = ::core::intrinsics::discriminant_value(self); let __arg1_tag = ::core::intrinsics::discriminant_value(other); - if __self_tag == __arg1_tag { + match ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) { + ::core::cmp::Ordering::Equal => match (self, other) { (Fielded::X(__self_0), Fielded::X(__arg1_0)) => ::core::cmp::Ord::cmp(__self_0, __arg1_0), @@ -1241,8 +1200,9 @@ impl ::core::cmp::Ord for Fielded { (Fielded::Z(__self_0), Fielded::Z(__arg1_0)) => ::core::cmp::Ord::cmp(__self_0, __arg1_0), _ => unsafe { ::core::intrinsics::unreachable() } - } - } else { ::core::cmp::Ord::cmp(&__self_tag, &__arg1_tag) } + }, + cmp => cmp, + } } } From 1cb1d63bd2d11ba1403c76f5c22802dd62ed2387 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Mon, 11 Jul 2022 12:54:52 +1000 Subject: [PATCH 11/11] Use `&{self.x}` for packed `Copy` structs. Because it's more concise than the `let` form. --- .../src/deriving/generic/mod.rs | 52 ++++++++++++------- .../ui/deriving/deriving-all-codegen.stdout | 26 +++------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 7ff75592a52fb..076b627ca79f6 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -1013,20 +1013,25 @@ impl<'a> MethodDef<'a> { /// } /// ``` /// But if the struct is `repr(packed)`, we can't use something like - /// `&self.x` on a packed type (as required for e.g. `Debug` and `Hash`) - /// because that might cause an unaligned ref. So we use let-destructuring - /// instead. If the struct impls `Copy`: + /// `&self.x` because that might cause an unaligned ref. So for any trait + /// method that takes a reference, if the struct impls `Copy` then we use a + /// local block to force a copy: /// ``` /// # struct A { x: u8, y: u8 } /// impl PartialEq for A { /// fn eq(&self, other: &A) -> bool { - /// let Self { x: __self_0_0, y: __self_0_1 } = *self; - /// let Self { x: __self_1_0, y: __self_1_1 } = *other; - /// __self_0_0 == __self_1_0 && __self_0_1 == __self_1_1 + /// // Desugars to `{ self.x }.eq(&{ other.y }) && ...` + /// { self.x } == { other.y } && { self.y } == { other.y } + /// } + /// } + /// impl Hash for A { + /// fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { + /// ::core::hash::Hash::hash(&{ self.x }, state); + /// ::core::hash::Hash::hash(&{ self.y }, state) /// } /// } /// ``` - /// If it doesn't impl `Copy`: + /// If the struct doesn't impl `Copy`, we use let-destructuring with `ref`: /// ``` /// # struct A { x: u8, y: u8 } /// impl PartialEq for A { @@ -1038,7 +1043,7 @@ impl<'a> MethodDef<'a> { /// } /// ``` /// This latter case only works if the fields match the alignment required - /// by the `packed(N)` attribute. + /// by the `packed(N)` attribute. (We'll get errors later on if not.) fn expand_struct_method_body<'b>( &self, cx: &mut ExtCtxt<'_>, @@ -1065,9 +1070,14 @@ impl<'a> MethodDef<'a> { if !is_packed { let selflike_fields = - trait_.create_struct_field_access_fields(cx, selflike_args, struct_def); + trait_.create_struct_field_access_fields(cx, selflike_args, struct_def, false); + mk_body(cx, selflike_fields) + } else if always_copy { + let selflike_fields = + trait_.create_struct_field_access_fields(cx, selflike_args, struct_def, true); mk_body(cx, selflike_fields) } else { + // Neither packed nor copy. Need to use ref patterns. let prefixes: Vec<_> = (0..selflike_args.len()).map(|i| format!("__self_{}", i)).collect(); let addr_of = always_copy; @@ -1536,6 +1546,7 @@ impl<'a> TraitDef<'a> { cx: &mut ExtCtxt<'_>, selflike_args: &[P], struct_def: &'a VariantData, + copy: bool, ) -> Vec { self.create_fields(struct_def, |i, struct_field, sp| { selflike_args @@ -1545,18 +1556,21 @@ impl<'a> TraitDef<'a> { // `unwrap_or_else` case otherwise the hygiene is wrong and we get // "field `0` of struct `Point` is private" errors on tuple // structs. - cx.expr_addr_of( + let mut field_expr = cx.expr( sp, - cx.expr( - sp, - ast::ExprKind::Field( - selflike_arg.clone(), - struct_field.ident.unwrap_or_else(|| { - Ident::from_str_and_span(&i.to_string(), struct_field.span) - }), - ), + ast::ExprKind::Field( + selflike_arg.clone(), + struct_field.ident.unwrap_or_else(|| { + Ident::from_str_and_span(&i.to_string(), struct_field.span) + }), ), - ) + ); + if copy { + field_expr = cx.expr_block( + cx.block(struct_field.span, vec![cx.stmt_expr(field_expr)]), + ); + } + cx.expr_addr_of(sp, field_expr) }) .collect() }) diff --git a/src/test/ui/deriving/deriving-all-codegen.stdout b/src/test/ui/deriving/deriving-all-codegen.stdout index e129f25b0dd0b..542911537be7e 100644 --- a/src/test/ui/deriving/deriving-all-codegen.stdout +++ b/src/test/ui/deriving/deriving-all-codegen.stdout @@ -441,9 +441,8 @@ impl ::core::marker::Copy for PackedCopy { } #[allow(unused_qualifications)] impl ::core::fmt::Debug for PackedCopy { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { - let Self(__self_0_0) = *self; ::core::fmt::Formatter::debug_tuple_field1_finish(f, "PackedCopy", - &&__self_0_0) + &&{ self.0 }) } } #[automatically_derived] @@ -458,8 +457,7 @@ impl ::core::default::Default for PackedCopy { #[allow(unused_qualifications)] impl ::core::hash::Hash for PackedCopy { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () { - let Self(__self_0_0) = *self; - ::core::hash::Hash::hash(&__self_0_0, state) + ::core::hash::Hash::hash(&{ self.0 }, state) } } impl ::core::marker::StructuralPartialEq for PackedCopy {} @@ -467,17 +465,9 @@ impl ::core::marker::StructuralPartialEq for PackedCopy {} #[allow(unused_qualifications)] impl ::core::cmp::PartialEq for PackedCopy { #[inline] - fn eq(&self, other: &PackedCopy) -> bool { - let Self(__self_0_0) = *self; - let Self(__self_1_0) = *other; - __self_0_0 == __self_1_0 - } + fn eq(&self, other: &PackedCopy) -> bool { { self.0 } == { other.0 } } #[inline] - fn ne(&self, other: &PackedCopy) -> bool { - let Self(__self_0_0) = *self; - let Self(__self_1_0) = *other; - __self_0_0 != __self_1_0 - } + fn ne(&self, other: &PackedCopy) -> bool { { self.0 } != { other.0 } } } impl ::core::marker::StructuralEq for PackedCopy {} #[automatically_derived] @@ -496,9 +486,7 @@ impl ::core::cmp::PartialOrd for PackedCopy { #[inline] fn partial_cmp(&self, other: &PackedCopy) -> ::core::option::Option<::core::cmp::Ordering> { - let Self(__self_0_0) = *self; - let Self(__self_1_0) = *other; - ::core::cmp::PartialOrd::partial_cmp(&__self_0_0, &__self_1_0) + ::core::cmp::PartialOrd::partial_cmp(&{ self.0 }, &{ other.0 }) } } #[automatically_derived] @@ -506,9 +494,7 @@ impl ::core::cmp::PartialOrd for PackedCopy { impl ::core::cmp::Ord for PackedCopy { #[inline] fn cmp(&self, other: &PackedCopy) -> ::core::cmp::Ordering { - let Self(__self_0_0) = *self; - let Self(__self_1_0) = *other; - ::core::cmp::Ord::cmp(&__self_0_0, &__self_1_0) + ::core::cmp::Ord::cmp(&{ self.0 }, &{ other.0 }) } }