From 373789b059324adef491bcabfff316c734522e78 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Mon, 21 Feb 2022 11:11:48 +0100 Subject: [PATCH 1/5] Don't use global initializer if type does not match This was relying on the presence of a bitcast to avoid using the constant global initializer for a load using a different type. With opaque pointers, we need to check this explicitly. --- compiler/rustc_codegen_llvm/src/builder.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 839018e2a759f..c41a41980eb0b 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -509,15 +509,20 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { OperandValue::Ref(place.llval, Some(llextra), place.align) } else if place.layout.is_llvm_immediate() { let mut const_llval = None; + let llty = place.layout.llvm_type(self); unsafe { if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); + if let Some(init) = llvm::LLVMGetInitializer(global) { + if self.val_ty(init) == llty { + const_llval = Some(init); + } + } } } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(place.layout.llvm_type(self), place.llval, place.align); + let load = self.load(llty, place.llval, place.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } From ebc8ab1e4e678202977e17a34313a98e7e899ed3 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Mon, 21 Feb 2022 12:25:54 +0100 Subject: [PATCH 2/5] Fix stack protector basic test This is a >= condition, so we need a maximum size of 7 to not create a stack protector in basic mode. The reason this still worked is that the alloca type was converted into an integer (rather than an array). The way these heuristics are implemented in LLVM is rather questionable and not resilient to optimization. --- .../stack-protector/stack-protector-heuristics-effect.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/assembly/stack-protector/stack-protector-heuristics-effect.rs b/src/test/assembly/stack-protector/stack-protector-heuristics-effect.rs index 530326ab7438a..7c2b605509053 100644 --- a/src/test/assembly/stack-protector/stack-protector-heuristics-effect.rs +++ b/src/test/assembly/stack-protector/stack-protector-heuristics-effect.rs @@ -78,7 +78,7 @@ pub fn array_u8_1(f: fn(*const u8)) { #[no_mangle] pub fn array_u8_small(f: fn(*const u8)) { let a = [0u8; 2]; - let b = [0u8; 8]; + let b = [0u8; 7]; f(&a as *const _); f(&b as *const _); From 4d7ff4e5096625b56f154fa485a1af9351c41b5c Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Mon, 21 Feb 2022 11:21:23 +0100 Subject: [PATCH 3/5] Update some codegen tests for opaque pointers --- .../codegen/abi-main-signature-32bit-c-int.rs | 2 +- src/test/codegen/adjustments.rs | 6 +-- src/test/codegen/align-enum.rs | 2 +- src/test/codegen/align-struct.rs | 5 +-- src/test/codegen/array-equality.rs | 19 +++----- src/test/codegen/atomic-operations.rs | 36 +++++++-------- src/test/codegen/c-variadic.rs | 8 ++-- src/test/codegen/consts.rs | 4 +- src/test/codegen/fastcall-inreg.rs | 2 +- src/test/codegen/ffi-out-of-bounds-loads.rs | 2 +- src/test/codegen/function-arguments-noopt.rs | 8 ++-- src/test/codegen/function-arguments.rs | 44 +++++++++---------- src/test/codegen/gdb_debug_script_load.rs | 2 +- src/test/codegen/intrinsics/nontemporal.rs | 2 +- src/test/codegen/intrinsics/prefetch.rs | 32 +++++++------- src/test/codegen/intrinsics/volatile.rs | 6 +-- src/test/codegen/intrinsics/volatile_order.rs | 2 +- src/test/codegen/issue-37945.rs | 8 ++-- src/test/codegen/issue-56267-2.rs | 2 +- src/test/codegen/issue-56267.rs | 2 +- src/test/codegen/issue-56927.rs | 14 +++--- src/test/codegen/issue-58881.rs | 2 +- src/test/codegen/lifetime_start_end.rs | 28 +++++------- src/test/codegen/loads.rs | 36 +++++++-------- src/test/codegen/match.rs | 4 +- src/test/codegen/mem-replace-direct-memcpy.rs | 4 +- src/test/codegen/packed.rs | 40 ++++++++--------- src/test/codegen/personality_lifetimes.rs | 8 ++-- src/test/codegen/refs.rs | 8 ++-- src/test/codegen/repeat-trusted-len.rs | 2 +- .../codegen/repr-transparent-aggregates-1.rs | 16 +++---- src/test/codegen/repr-transparent.rs | 6 +-- .../riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs | 10 ++--- .../simd-intrinsic-generic-gather.rs | 4 +- .../simd-intrinsic-generic-scatter.rs | 4 +- .../simd-intrinsic-transmute-array.rs | 12 ++--- src/test/codegen/simd_arith_offset.rs | 2 +- src/test/codegen/slice-init.rs | 12 ++--- src/test/codegen/slice-iter-len-eq-zero.rs | 2 +- src/test/codegen/slice-ref-equality.rs | 12 ++--- src/test/codegen/stores.rs | 12 ++--- src/test/codegen/swap-large-types.rs | 6 +-- src/test/codegen/thread-local.rs | 8 ++-- src/test/codegen/transmute-scalar.rs | 42 ++++++++---------- src/test/codegen/uninit-consts.rs | 8 ++-- src/test/codegen/union-abi.rs | 10 ++--- src/test/codegen/used_with_arg.rs | 4 +- src/test/codegen/zst-offset.rs | 9 ++-- .../coverage-llvmir/filecheck.testprog.txt | 12 ++--- .../filecheck-patterns.txt | 4 +- 50 files changed, 251 insertions(+), 284 deletions(-) diff --git a/src/test/codegen/abi-main-signature-32bit-c-int.rs b/src/test/codegen/abi-main-signature-32bit-c-int.rs index a7a4520ff9545..31b19a54276e7 100644 --- a/src/test/codegen/abi-main-signature-32bit-c-int.rs +++ b/src/test/codegen/abi-main-signature-32bit-c-int.rs @@ -7,4 +7,4 @@ fn main() { } -// CHECK: define i32 @main(i32{{( %0)?}}, i8**{{( %1)?}}) +// CHECK: define i32 @main(i32{{( %0)?}}, {{i8\*\*|ptr}}{{( %1)?}}) diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index ded310d0aebb1..39880c9341f4f 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -13,9 +13,9 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0 -// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1 -// CHECK: ret { [0 x i8]*, [[USIZE]] } %1 +// CHECK: %0 = insertvalue { {{\[0 x i8\]\*|ptr}}, [[USIZE]] } undef, {{\[0 x i8\]\*|ptr}} %x.0, 0 +// CHECK: %1 = insertvalue { {{\[0 x i8\]\*|ptr}}, [[USIZE]] } %0, [[USIZE]] %x.1, 1 +// CHECK: ret { {{\[0 x i8\]\*|ptr}}, [[USIZE]] } %1 { x } } diff --git a/src/test/codegen/align-enum.rs b/src/test/codegen/align-enum.rs index 441cd04690e70..70f09ace0062c 100644 --- a/src/test/codegen/align-enum.rs +++ b/src/test/codegen/align-enum.rs @@ -20,7 +20,7 @@ pub struct Nested64 { #[no_mangle] pub fn align64(a: u32) -> Align64 { // CHECK: %a64 = alloca %Align64, align 64 -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 64 %{{.*}}, i8* align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 64 %{{.*}}, {{i8\*|ptr}} align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false) let a64 = Align64::A(a); a64 } diff --git a/src/test/codegen/align-struct.rs b/src/test/codegen/align-struct.rs index f129f073e98de..a2f47354b2b37 100644 --- a/src/test/codegen/align-struct.rs +++ b/src/test/codegen/align-struct.rs @@ -32,7 +32,7 @@ pub enum Enum64 { #[no_mangle] pub fn align64(i : i32) -> Align64 { // CHECK: %a64 = alloca %Align64, align 64 -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 64 %{{.*}}, i8* align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 64 %{{.*}}, {{i8\*|ptr}} align 64 %{{.*}}, i{{[0-9]+}} 64, i1 false) let a64 = Align64(i); a64 } @@ -42,8 +42,7 @@ pub fn align64(i : i32) -> Align64 { // CHECK-LABEL: @align64_load #[no_mangle] pub fn align64_load(a: Align64) -> i32 { -// CHECK: [[FIELD:%.*]] = bitcast %Align64* %{{.*}} to i32* -// CHECK: {{%.*}} = load i32, i32* [[FIELD]], align 64 +// CHECK: {{%.*}} = load i32, {{i32\*|ptr}} {{%.*}}, align 64 a.0 } diff --git a/src/test/codegen/array-equality.rs b/src/test/codegen/array-equality.rs index 8dce004b54a1b..cd5e82a9205c1 100644 --- a/src/test/codegen/array-equality.rs +++ b/src/test/codegen/array-equality.rs @@ -16,8 +16,8 @@ pub fn array_eq_value(a: [u16; 3], b: [u16; 3]) -> bool { #[no_mangle] pub fn array_eq_ref(a: &[u16; 3], b: &[u16; 3]) -> bool { // CHECK: start: - // CHECK: load i48, i48* %{{.+}}, align 2 - // CHECK: load i48, i48* %{{.+}}, align 2 + // CHECK: load i48, {{i48\*|ptr}} %{{.+}}, align 2 + // CHECK: load i48, {{i48\*|ptr}} %{{.+}}, align 2 // CHECK: icmp eq i48 // CHECK-NEXT: ret a == b @@ -27,9 +27,7 @@ pub fn array_eq_ref(a: &[u16; 3], b: &[u16; 3]) -> bool { #[no_mangle] pub fn array_eq_value_still_passed_by_pointer(a: [u16; 9], b: [u16; 9]) -> bool { // CHECK-NEXT: start: - // CHECK-NEXT: bitcast - // CHECK-NEXT: bitcast - // CHECK-NEXT: %[[CMP:.+]] = tail call i32 @{{bcmp|memcmp}}(i8* {{.*}} dereferenceable(18) %{{.+}}, i8* {{.*}} dereferenceable(18) %{{.+}}, i64 18) + // CHECK: %[[CMP:.+]] = tail call i32 @{{bcmp|memcmp}}({{i8\*|ptr}} {{.*}} dereferenceable(18) %{{.+}}, {{i8\*|ptr}} {{.*}} dereferenceable(18) %{{.+}}, i64 18) // CHECK-NEXT: %[[EQ:.+]] = icmp eq i32 %[[CMP]], 0 // CHECK-NEXT: ret i1 %[[EQ]] a == b @@ -39,9 +37,7 @@ pub fn array_eq_value_still_passed_by_pointer(a: [u16; 9], b: [u16; 9]) -> bool #[no_mangle] pub fn array_eq_long(a: &[u16; 1234], b: &[u16; 1234]) -> bool { // CHECK-NEXT: start: - // CHECK-NEXT: bitcast - // CHECK-NEXT: bitcast - // CHECK-NEXT: %[[CMP:.+]] = tail call i32 @{{bcmp|memcmp}}(i8* {{.*}} dereferenceable(2468) %{{.+}}, i8* {{.*}} dereferenceable(2468) %{{.+}}, i64 2468) + // CHECK: %[[CMP:.+]] = tail call i32 @{{bcmp|memcmp}}({{i8\*|ptr}} {{.*}} dereferenceable(2468) %{{.+}}, {{i8\*|ptr}} {{.*}} dereferenceable(2468) %{{.+}}, i64 2468) // CHECK-NEXT: %[[EQ:.+]] = icmp eq i32 %[[CMP]], 0 // CHECK-NEXT: ret i1 %[[EQ]] a == b @@ -56,18 +52,17 @@ pub fn array_eq_zero_short(x: [u16; 3]) -> bool { x == [0; 3] } -// CHECK-LABEL: @array_eq_zero_mid([8 x i16]* +// CHECK-LABEL: @array_eq_zero_mid( #[no_mangle] pub fn array_eq_zero_mid(x: [u16; 8]) -> bool { // CHECK-NEXT: start: - // CHECK-NEXT: bitcast - // CHECK-NEXT: %[[LOAD:.+]] = load i128, + // CHECK: %[[LOAD:.+]] = load i128, // CHECK-NEXT: %[[EQ:.+]] = icmp eq i128 %[[LOAD]], 0 // CHECK-NEXT: ret i1 %[[EQ]] x == [0; 8] } -// CHECK-LABEL: @array_eq_zero_long([1234 x i16]* +// CHECK-LABEL: @array_eq_zero_long( #[no_mangle] pub fn array_eq_zero_long(x: [u16; 1234]) -> bool { // CHECK-NEXT: start: diff --git a/src/test/codegen/atomic-operations.rs b/src/test/codegen/atomic-operations.rs index ff94ac8543f8b..a14f63726bb9a 100644 --- a/src/test/codegen/atomic-operations.rs +++ b/src/test/codegen/atomic-operations.rs @@ -8,25 +8,25 @@ use std::sync::atomic::{AtomicI32, Ordering::*}; // CHECK-LABEL: @compare_exchange #[no_mangle] pub fn compare_exchange(a: &AtomicI32) { - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 10 monotonic monotonic + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 10 monotonic monotonic let _ = a.compare_exchange(0, 10, Relaxed, Relaxed); - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 20 release monotonic + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 20 release monotonic let _ = a.compare_exchange(0, 20, Release, Relaxed); - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 30 acquire monotonic - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 31 acquire acquire + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 30 acquire monotonic + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 31 acquire acquire let _ = a.compare_exchange(0, 30, Acquire, Relaxed); let _ = a.compare_exchange(0, 31, Acquire, Acquire); - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 40 acq_rel monotonic - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 41 acq_rel acquire + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 40 acq_rel monotonic + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 41 acq_rel acquire let _ = a.compare_exchange(0, 40, AcqRel, Relaxed); let _ = a.compare_exchange(0, 41, AcqRel, Acquire); - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 50 seq_cst monotonic - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 51 seq_cst acquire - // CHECK: cmpxchg i32* %{{.*}}, i32 0, i32 52 seq_cst seq_cst + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 50 seq_cst monotonic + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 51 seq_cst acquire + // CHECK: cmpxchg {{i32\*|ptr}} %{{.*}}, i32 0, i32 52 seq_cst seq_cst let _ = a.compare_exchange(0, 50, SeqCst, Relaxed); let _ = a.compare_exchange(0, 51, SeqCst, Acquire); let _ = a.compare_exchange(0, 52, SeqCst, SeqCst); @@ -35,25 +35,25 @@ pub fn compare_exchange(a: &AtomicI32) { // CHECK-LABEL: @compare_exchange_weak #[no_mangle] pub fn compare_exchange_weak(w: &AtomicI32) { - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 10 monotonic monotonic + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 10 monotonic monotonic let _ = w.compare_exchange_weak(1, 10, Relaxed, Relaxed); - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 20 release monotonic + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 20 release monotonic let _ = w.compare_exchange_weak(1, 20, Release, Relaxed); - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 30 acquire monotonic - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 31 acquire acquire + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 30 acquire monotonic + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 31 acquire acquire let _ = w.compare_exchange_weak(1, 30, Acquire, Relaxed); let _ = w.compare_exchange_weak(1, 31, Acquire, Acquire); - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 40 acq_rel monotonic - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 41 acq_rel acquire + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 40 acq_rel monotonic + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 41 acq_rel acquire let _ = w.compare_exchange_weak(1, 40, AcqRel, Relaxed); let _ = w.compare_exchange_weak(1, 41, AcqRel, Acquire); - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 50 seq_cst monotonic - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 51 seq_cst acquire - // CHECK: cmpxchg weak i32* %{{.*}}, i32 1, i32 52 seq_cst seq_cst + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 50 seq_cst monotonic + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 51 seq_cst acquire + // CHECK: cmpxchg weak {{i32\*|ptr}} %{{.*}}, i32 1, i32 52 seq_cst seq_cst let _ = w.compare_exchange_weak(1, 50, SeqCst, Relaxed); let _ = w.compare_exchange_weak(1, 51, SeqCst, Acquire); let _ = w.compare_exchange_weak(1, 52, SeqCst, SeqCst); diff --git a/src/test/codegen/c-variadic.rs b/src/test/codegen/c-variadic.rs index 668d023af96a5..a5be56c47be81 100644 --- a/src/test/codegen/c-variadic.rs +++ b/src/test/codegen/c-variadic.rs @@ -28,21 +28,21 @@ pub unsafe extern "C" fn use_foreign_c_variadic_0() { // Ensure that we do not remove the `va_list` passed to the foreign function when // removing the "spoofed" `VaListImpl` that is used by Rust defined C-variadics. pub unsafe extern "C" fn use_foreign_c_variadic_1_0(ap: VaList) { - // CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap) + // CHECK: call void ({{.*}}, ...) @foreign_c_variadic_1({{.*}} %ap) foreign_c_variadic_1(ap); } pub unsafe extern "C" fn use_foreign_c_variadic_1_1(ap: VaList) { - // CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 42) + // CHECK: call void ({{.*}}, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 42) foreign_c_variadic_1(ap, 42i32); } pub unsafe extern "C" fn use_foreign_c_variadic_1_2(ap: VaList) { - // CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42) + // CHECK: call void ({{.*}}, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42) foreign_c_variadic_1(ap, 2i32, 42i32); } pub unsafe extern "C" fn use_foreign_c_variadic_1_3(ap: VaList) { - // CHECK: call void ({{.*}}*, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42, [[PARAM]] 0) + // CHECK: call void ({{.*}}, ...) @foreign_c_variadic_1({{.*}} %ap, [[PARAM]] 2, [[PARAM]] 42, [[PARAM]] 0) foreign_c_variadic_1(ap, 2i32, 42i32, 0i32); } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index f2c2f47741378..c97223879ca3a 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -43,7 +43,7 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant - // CHECK: memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 2 %1, i8* align 2 getelementptr inbounds (<{ [4 x i8], [4 x i8] }>, <{ [4 x i8], [4 x i8] }>* [[LOW_HIGH]], i32 0, i32 0, i32 0), i{{(32|64)}} 8, i1 false) + // CHECK: memcpy.{{.+}}({{i8\*|ptr}} align 2 %{{[0-9]+}}, {{i8\*|ptr}} align 2 {{.*}}[[LOW_HIGH]]{{.*}}, i{{(32|64)}} 8, i1 false) *&E::A(0) } @@ -51,6 +51,6 @@ pub fn low_align_const() -> E { #[no_mangle] pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant - // CHECK: memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [4 x i8], [4 x i8] }>, <{ [4 x i8], [4 x i8] }>* [[LOW_HIGH]], i32 0, i32 0, i32 0), i{{(32|64)}} 8, i1 false) + // CHECK: memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[LOW_HIGH]]{{.*}}, i{{(32|64)}} 8, i1 false) *&E::A(0) } diff --git a/src/test/codegen/fastcall-inreg.rs b/src/test/codegen/fastcall-inreg.rs index f550ac11f64ae..d426ade28dd12 100644 --- a/src/test/codegen/fastcall-inreg.rs +++ b/src/test/codegen/fastcall-inreg.rs @@ -19,7 +19,7 @@ pub mod tests { #[no_mangle] pub extern "fastcall" fn f1(_: i32, _: i32, _: i32) {} - // CHECK: @f2(i32* inreg %_1, i32* inreg %_2, i32* %_3) + // CHECK: @f2({{i32\*|ptr}} inreg %_1, {{i32\*|ptr}} inreg %_2, {{i32\*|ptr}} %_3) #[no_mangle] pub extern "fastcall" fn f2(_: *const i32, _: *const i32, _: *const i32) {} diff --git a/src/test/codegen/ffi-out-of-bounds-loads.rs b/src/test/codegen/ffi-out-of-bounds-loads.rs index dc16306eb96be..099726b2f086e 100644 --- a/src/test/codegen/ffi-out-of-bounds-loads.rs +++ b/src/test/codegen/ffi-out-of-bounds-loads.rs @@ -18,7 +18,7 @@ extern "C" { fn main() { let s = S { f1: 1, f2: 2, f3: 3 }; unsafe { - // CHECK: load { i64, i32 }, { i64, i32 }* {{.*}}, align 4 + // CHECK: load { i64, i32 }, {{.*}}, align 4 // CHECK: call void @foo({ i64, i32 } {{.*}}) foo(s); } diff --git a/src/test/codegen/function-arguments-noopt.rs b/src/test/codegen/function-arguments-noopt.rs index 94561288dc5b9..ff76405a4ea32 100644 --- a/src/test/codegen/function-arguments-noopt.rs +++ b/src/test/codegen/function-arguments-noopt.rs @@ -23,7 +23,7 @@ pub fn boolean_call(x: bool, f: fn(bool) -> bool) -> bool { f(x) } -// CHECK: align 4 i32* @borrow(i32* align 4 %x) +// CHECK: align 4 {{i32\*|ptr}} @borrow({{i32\*|ptr}} align 4 %x) #[no_mangle] pub fn borrow(x: &i32) -> &i32 { x @@ -32,11 +32,11 @@ pub fn borrow(x: &i32) -> &i32 { // CHECK-LABEL: @borrow_call #[no_mangle] pub fn borrow_call(x: &i32, f: fn(&i32) -> &i32) -> &i32 { - // CHECK: call align 4 i32* %f(i32* align 4 %x) + // CHECK: call align 4 {{i32\*|ptr}} %f({{i32\*|ptr}} align 4 %x) f(x) } -// CHECK: void @struct_(%S* sret(%S){{( %0)?}}, %S* %x) +// CHECK: void @struct_({{%S\*|ptr}} sret(%S){{( %0)?}}, {{%S\*|ptr}} %x) #[no_mangle] pub fn struct_(x: S) -> S { x @@ -45,7 +45,7 @@ pub fn struct_(x: S) -> S { // CHECK-LABEL: @struct_call #[no_mangle] pub fn struct_call(x: S, f: fn(S) -> S) -> S { - // CHECK: call void %f(%S* sret(%S){{( %0)?}}, %S* %{{.+}}) + // CHECK: call void %f({{%S\*|ptr}} sret(%S){{( %0)?}}, {{%S\*|ptr}} %{{.+}}) f(x) } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index b1ccbdd934aa9..ae6abe7a184c6 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -73,67 +73,67 @@ pub fn option_nonzero_int(x: Option) -> Option { x } -// CHECK: @readonly_borrow(i32* noalias noundef readonly align 4 dereferenceable(4) %_1) +// CHECK: @readonly_borrow({{i32\*|ptr}} noalias noundef readonly align 4 dereferenceable(4) %_1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn readonly_borrow(_: &i32) { } -// CHECK: @static_borrow(i32* noalias noundef readonly align 4 dereferenceable(4) %_1) +// CHECK: @static_borrow({{i32\*|ptr}} noalias noundef readonly align 4 dereferenceable(4) %_1) // static borrow may be captured #[no_mangle] pub fn static_borrow(_: &'static i32) { } -// CHECK: @named_borrow(i32* noalias noundef readonly align 4 dereferenceable(4) %_1) +// CHECK: @named_borrow({{i32\*|ptr}} noalias noundef readonly align 4 dereferenceable(4) %_1) // borrow with named lifetime may be captured #[no_mangle] pub fn named_borrow<'r>(_: &'r i32) { } -// CHECK: @unsafe_borrow(i16* noundef align 2 dereferenceable(2) %_1) +// CHECK: @unsafe_borrow({{i16\*|ptr}} noundef align 2 dereferenceable(2) %_1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_borrow(_: &UnsafeInner) { } -// CHECK: @mutable_unsafe_borrow(i16* noalias noundef align 2 dereferenceable(2) %_1) +// CHECK: @mutable_unsafe_borrow({{i16\*|ptr}} noalias noundef align 2 dereferenceable(2) %_1) // ... unless this is a mutable borrow, those never alias #[no_mangle] pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { } -// CHECK: @mutable_borrow(i32* noalias noundef align 4 dereferenceable(4) %_1) +// CHECK: @mutable_borrow({{i32\*|ptr}} noalias noundef align 4 dereferenceable(4) %_1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn mutable_borrow(_: &mut i32) { } -// CHECK: @indirect_struct(%S* noalias nocapture noundef dereferenceable(32) %_1) +// CHECK: @indirect_struct({{%S\*|ptr}} noalias nocapture noundef dereferenceable(32) %_1) #[no_mangle] pub fn indirect_struct(_: S) { } -// CHECK: @borrowed_struct(%S* noalias noundef readonly align 4 dereferenceable(32) %_1) +// CHECK: @borrowed_struct({{%S\*|ptr}} noalias noundef readonly align 4 dereferenceable(32) %_1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn borrowed_struct(_: &S) { } -// CHECK: @raw_struct(%S* %_1) +// CHECK: @raw_struct({{%S\*|ptr}} %_1) #[no_mangle] pub fn raw_struct(_: *const S) { } // `Box` can get deallocated during execution of the function, so it should // not get `dereferenceable`. -// CHECK: noalias noundef nonnull align 4 i32* @_box(i32* noalias noundef nonnull align 4 %x) +// CHECK: noalias noundef nonnull align 4 {{i32\*|ptr}} @_box({{i32\*|ptr}} noalias noundef nonnull align 4 %x) #[no_mangle] pub fn _box(x: Box) -> Box { x } -// CHECK: @struct_return(%S* noalias nocapture noundef sret(%S) dereferenceable(32){{( %0)?}}) +// CHECK: @struct_return({{%S\*|ptr}} noalias nocapture noundef sret(%S) dereferenceable(32){{( %0)?}}) #[no_mangle] pub fn struct_return() -> S { S { @@ -147,58 +147,58 @@ pub fn struct_return() -> S { pub fn helper(_: usize) { } -// CHECK: @slice([0 x i8]* noalias noundef nonnull readonly align 1 %_1.0, [[USIZE]] %_1.1) +// CHECK: @slice({{\[0 x i8\]\*|ptr}} noalias noundef nonnull readonly align 1 %_1.0, [[USIZE]] %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn slice(_: &[u8]) { } -// CHECK: @mutable_slice([0 x i8]* noalias noundef nonnull align 1 %_1.0, [[USIZE]] %_1.1) +// CHECK: @mutable_slice({{\[0 x i8\]\*|ptr}} noalias noundef nonnull align 1 %_1.0, [[USIZE]] %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice([0 x i16]* noundef nonnull align 2 %_1.0, [[USIZE]] %_1.1) +// CHECK: @unsafe_slice({{\[0 x i16\]\*|ptr}} noundef nonnull align 2 %_1.0, [[USIZE]] %_1.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { } -// CHECK: @raw_slice([0 x i8]* %_1.0, [[USIZE]] %_1.1) +// CHECK: @raw_slice({{\[0 x i8\]\*|ptr}} %_1.0, [[USIZE]] %_1.1) #[no_mangle] pub fn raw_slice(_: *const [u8]) { } -// CHECK: @str([0 x i8]* noalias noundef nonnull readonly align 1 %_1.0, [[USIZE]] %_1.1) +// CHECK: @str({{\[0 x i8\]\*|ptr}} noalias noundef nonnull readonly align 1 %_1.0, [[USIZE]] %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn str(_: &[u8]) { } -// CHECK: @trait_borrow({}* noundef nonnull align 1 %_1.0, [3 x [[USIZE]]]* noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %_1.1) +// CHECK: @trait_borrow({{\{\}\*|ptr}} noundef nonnull align 1 %_1.0, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn trait_borrow(_: &Drop) { } -// CHECK: @trait_raw({}* %_1.0, [3 x [[USIZE]]]* noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %_1.1) +// CHECK: @trait_raw({{\{\}\*|ptr}} %_1.0, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %_1.1) #[no_mangle] pub fn trait_raw(_: *const Drop) { } -// CHECK: @trait_box({}* noalias noundef nonnull align 1{{( %0)?}}, [3 x [[USIZE]]]* noalias noundef readonly align {{.*}} dereferenceable({{.*}}){{( %1)?}}) +// CHECK: @trait_box({{\{\}\*|ptr}} noalias noundef nonnull align 1{{( %0)?}}, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}){{( %1)?}}) #[no_mangle] pub fn trait_box(_: Box) { } -// CHECK: { i8*, i8* } @trait_option(i8* noalias noundef align 1 %x.0, i8* %x.1) +// CHECK: { {{i8\*|ptr}}, {{i8\*|ptr}} } @trait_option({{i8\*|ptr}} noalias noundef align 1 %x.0, {{i8\*|ptr}} %x.1) #[no_mangle] pub fn trait_option(x: Option>) -> Option> { x } -// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias noundef nonnull readonly align 2 %x.0, [[USIZE]] %x.1) +// CHECK: { {{\[0 x i16\]\*|ptr}}, [[USIZE]] } @return_slice({{\[0 x i16\]\*|ptr}} noalias noundef nonnull readonly align 2 %x.0, [[USIZE]] %x.1) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x @@ -216,7 +216,7 @@ pub fn enum_id_2(x: Option) -> Option { x } -// CHECK: noalias i8* @allocator() +// CHECK: noalias {{i8\*|ptr}} @allocator() #[no_mangle] #[rustc_allocator] pub fn allocator() -> *const i8 { diff --git a/src/test/codegen/gdb_debug_script_load.rs b/src/test/codegen/gdb_debug_script_load.rs index 856b67bf9df93..002be8d1b4192 100644 --- a/src/test/codegen/gdb_debug_script_load.rs +++ b/src/test/codegen/gdb_debug_script_load.rs @@ -9,7 +9,7 @@ #![feature(start)] // CHECK-LABEL: @main -// CHECK: load volatile i8, i8* getelementptr inbounds ([[B:\[[0-9]* x i8\]]], [[B]]* @__rustc_debug_gdb_scripts_section__, i32 0, i32 0), align 1 +// CHECK: load volatile i8, {{.+}} @__rustc_debug_gdb_scripts_section__ #[start] fn start(_: isize, _: *const *const u8) -> isize { diff --git a/src/test/codegen/intrinsics/nontemporal.rs b/src/test/codegen/intrinsics/nontemporal.rs index d13f3e51ba4c7..d8ee294526620 100644 --- a/src/test/codegen/intrinsics/nontemporal.rs +++ b/src/test/codegen/intrinsics/nontemporal.rs @@ -6,7 +6,7 @@ #[no_mangle] pub fn a(a: &mut u32, b: u32) { // CHECK-LABEL: define{{.*}}void @a - // CHECK: store i32 %b, i32* %a, align 4, !nontemporal + // CHECK: store i32 %b, {{i32\*|ptr}} %a, align 4, !nontemporal unsafe { std::intrinsics::nontemporal_store(a, b); } diff --git a/src/test/codegen/intrinsics/prefetch.rs b/src/test/codegen/intrinsics/prefetch.rs index 2386fc43007a2..59d7fa6381b8f 100644 --- a/src/test/codegen/intrinsics/prefetch.rs +++ b/src/test/codegen/intrinsics/prefetch.rs @@ -9,13 +9,13 @@ use std::intrinsics::{prefetch_read_data, prefetch_write_data, #[no_mangle] pub fn check_prefetch_read_data(data: &[i8]) { unsafe { - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 0, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 0, i32 1) prefetch_read_data(data.as_ptr(), 0); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 1, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 1, i32 1) prefetch_read_data(data.as_ptr(), 1); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 2, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 2, i32 1) prefetch_read_data(data.as_ptr(), 2); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 3, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 3, i32 1) prefetch_read_data(data.as_ptr(), 3); } } @@ -23,13 +23,13 @@ pub fn check_prefetch_read_data(data: &[i8]) { #[no_mangle] pub fn check_prefetch_write_data(data: &[i8]) { unsafe { - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 0, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 0, i32 1) prefetch_write_data(data.as_ptr(), 0); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 1, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 1, i32 1) prefetch_write_data(data.as_ptr(), 1); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 2, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 2, i32 1) prefetch_write_data(data.as_ptr(), 2); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 3, i32 1) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 3, i32 1) prefetch_write_data(data.as_ptr(), 3); } } @@ -37,13 +37,13 @@ pub fn check_prefetch_write_data(data: &[i8]) { #[no_mangle] pub fn check_prefetch_read_instruction(data: &[i8]) { unsafe { - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 0, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 0, i32 0) prefetch_read_instruction(data.as_ptr(), 0); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 1, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 1, i32 0) prefetch_read_instruction(data.as_ptr(), 1); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 2, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 2, i32 0) prefetch_read_instruction(data.as_ptr(), 2); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 0, i32 3, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 0, i32 3, i32 0) prefetch_read_instruction(data.as_ptr(), 3); } } @@ -51,13 +51,13 @@ pub fn check_prefetch_read_instruction(data: &[i8]) { #[no_mangle] pub fn check_prefetch_write_instruction(data: &[i8]) { unsafe { - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 0, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 0, i32 0) prefetch_write_instruction(data.as_ptr(), 0); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 1, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 1, i32 0) prefetch_write_instruction(data.as_ptr(), 1); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 2, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 2, i32 0) prefetch_write_instruction(data.as_ptr(), 2); - // CHECK: call void @llvm.prefetch{{.*}}(i8* %{{.*}}, i32 1, i32 3, i32 0) + // CHECK: call void @llvm.prefetch{{.*}}({{.*}}, i32 1, i32 3, i32 0) prefetch_write_instruction(data.as_ptr(), 3); } } diff --git a/src/test/codegen/intrinsics/volatile.rs b/src/test/codegen/intrinsics/volatile.rs index 1970517e73262..7980c00e7e729 100644 --- a/src/test/codegen/intrinsics/volatile.rs +++ b/src/test/codegen/intrinsics/volatile.rs @@ -8,21 +8,21 @@ use std::intrinsics; // CHECK-LABEL: @volatile_copy_memory #[no_mangle] pub unsafe fn volatile_copy_memory(a: *mut u8, b: *const u8) { - // CHECK: llvm.memmove.p0i8.p0i8.{{\w*(.*true)}} + // CHECK: llvm.memmove.{{\w*(.*true)}} intrinsics::volatile_copy_memory(a, b, 1) } // CHECK-LABEL: @volatile_copy_nonoverlapping_memory #[no_mangle] pub unsafe fn volatile_copy_nonoverlapping_memory(a: *mut u8, b: *const u8) { - // CHECK: llvm.memcpy.p0i8.p0i8.{{\w*(.*true)}} + // CHECK: llvm.memcpy.{{\w*(.*true)}} intrinsics::volatile_copy_nonoverlapping_memory(a, b, 1) } // CHECK-LABEL: @volatile_set_memory #[no_mangle] pub unsafe fn volatile_set_memory(a: *mut u8, b: u8) { - // CHECK: llvm.memset.p0i8.{{\w*(.*true)}} + // CHECK: llvm.memset.{{\w*(.*true)}} intrinsics::volatile_set_memory(a, b, 1) } diff --git a/src/test/codegen/intrinsics/volatile_order.rs b/src/test/codegen/intrinsics/volatile_order.rs index 29331219ba6ee..99469831a6c40 100644 --- a/src/test/codegen/intrinsics/volatile_order.rs +++ b/src/test/codegen/intrinsics/volatile_order.rs @@ -13,6 +13,6 @@ pub unsafe fn test_volatile_order() { volatile_store(&mut *a, 12); // CHECK: store volatile unaligned_volatile_store(&mut *a, 12); - // CHECK: llvm.memset.p0i8 + // CHECK: llvm.memset.p0 volatile_set_memory(&mut *a, 12, 1) } diff --git a/src/test/codegen/issue-37945.rs b/src/test/codegen/issue-37945.rs index a91e8e817e403..ee63a783f52db 100644 --- a/src/test/codegen/issue-37945.rs +++ b/src/test/codegen/issue-37945.rs @@ -15,9 +15,9 @@ use std::slice::Iter; pub fn is_empty_1(xs: Iter) -> bool { // CHECK-LABEL: @is_empty_1( // CHECK-NEXT: start: -// CHECK-NEXT: [[A:%.*]] = icmp ne i32* %xs.1, null +// CHECK-NEXT: [[A:%.*]] = icmp ne {{i32\*|ptr}} %xs.1, null // CHECK-NEXT: tail call void @llvm.assume(i1 [[A]]) -// CHECK-NEXT: [[B:%.*]] = icmp eq i32* %xs.0, %xs.1 +// CHECK-NEXT: [[B:%.*]] = icmp eq {{i32\*|ptr}} %xs.0, %xs.1 // CHECK-NEXT: ret i1 [[B:%.*]] {xs}.next().is_none() } @@ -26,9 +26,9 @@ pub fn is_empty_1(xs: Iter) -> bool { pub fn is_empty_2(xs: Iter) -> bool { // CHECK-LABEL: @is_empty_2 // CHECK-NEXT: start: -// CHECK-NEXT: [[C:%.*]] = icmp ne i32* %xs.1, null +// CHECK-NEXT: [[C:%.*]] = icmp ne {{i32\*|ptr}} %xs.1, null // CHECK-NEXT: tail call void @llvm.assume(i1 [[C]]) -// CHECK-NEXT: [[D:%.*]] = icmp eq i32* %xs.0, %xs.1 +// CHECK-NEXT: [[D:%.*]] = icmp eq {{i32\*|ptr}} %xs.0, %xs.1 // CHECK-NEXT: ret i1 [[D:%.*]] xs.map(|&x| x).next().is_none() } diff --git a/src/test/codegen/issue-56267-2.rs b/src/test/codegen/issue-56267-2.rs index 53b83f4a5305b..4dc9ebfebbcdf 100644 --- a/src/test/codegen/issue-56267-2.rs +++ b/src/test/codegen/issue-56267-2.rs @@ -11,7 +11,7 @@ pub struct Foo { // The load from bar.1 should have alignment 4. Not checking // other loads here, as the alignment will be platform-dependent. -// CHECK: %{{.+}} = load i32, i32* %{{.+}}, align 4 +// CHECK: %{{.+}} = load i32, {{i32\*|ptr}} %{{.+}}, align 4 #[no_mangle] pub fn test(x: Foo<(i32, i32)>) -> (i32, i32) { x.bar diff --git a/src/test/codegen/issue-56267.rs b/src/test/codegen/issue-56267.rs index 2c33f558931ed..7bdd257799832 100644 --- a/src/test/codegen/issue-56267.rs +++ b/src/test/codegen/issue-56267.rs @@ -11,7 +11,7 @@ pub struct Foo { // The store writing to bar.1 should have alignment 4. Not checking // other stores here, as the alignment will be platform-dependent. -// CHECK: store i32 [[TMP1:%.+]], i32* [[TMP2:%.+]], align 4 +// CHECK: store i32 [[TMP1:%.+]], {{i32\*|ptr}} [[TMP2:%.+]], align 4 #[no_mangle] pub fn test(x: (i32, i32)) -> Foo<(i32, i32)> { Foo { foo: 0, bar: x } diff --git a/src/test/codegen/issue-56927.rs b/src/test/codegen/issue-56927.rs index 2c84015d5e29a..044d721814bd3 100644 --- a/src/test/codegen/issue-56927.rs +++ b/src/test/codegen/issue-56927.rs @@ -8,10 +8,10 @@ pub struct S { } // CHECK-LABEL: @test1 -// CHECK: store i32 0, i32* %{{.+}}, align 16 -// CHECK: store i32 1, i32* %{{.+}}, align 4 -// CHECK: store i32 2, i32* %{{.+}}, align 8 -// CHECK: store i32 3, i32* %{{.+}}, align 4 +// CHECK: store i32 0, {{i32\*|ptr}} %{{.+}}, align 16 +// CHECK: store i32 1, {{i32\*|ptr}} %{{.+}}, align 4 +// CHECK: store i32 2, {{i32\*|ptr}} %{{.+}}, align 8 +// CHECK: store i32 3, {{i32\*|ptr}} %{{.+}}, align 4 #[no_mangle] pub fn test1(s: &mut S) { s.arr[0] = 0; @@ -21,7 +21,7 @@ pub fn test1(s: &mut S) { } // CHECK-LABEL: @test2 -// CHECK: store i32 4, i32* %{{.+}}, align 4 +// CHECK: store i32 4, {{i32\*|ptr}} %{{.+}}, align 4 #[allow(unconditional_panic)] #[no_mangle] pub fn test2(s: &mut S) { @@ -29,14 +29,14 @@ pub fn test2(s: &mut S) { } // CHECK-LABEL: @test3 -// CHECK: store i32 5, i32* %{{.+}}, align 4 +// CHECK: store i32 5, {{i32\*|ptr}} %{{.+}}, align 4 #[no_mangle] pub fn test3(s: &mut S, i: usize) { s.arr[i] = 5; } // CHECK-LABEL: @test4 -// CHECK: store i32 6, i32* %{{.+}}, align 4 +// CHECK: store i32 6, {{i32\*|ptr}} %{{.+}}, align 4 #[no_mangle] pub fn test4(s: &mut S) { s.arr = [6; 4]; diff --git a/src/test/codegen/issue-58881.rs b/src/test/codegen/issue-58881.rs index de451324f03c4..0900a33377bcd 100644 --- a/src/test/codegen/issue-58881.rs +++ b/src/test/codegen/issue-58881.rs @@ -16,6 +16,6 @@ struct Bar(u64, u64, u64); // Ensure that emit arguments of the correct type. pub unsafe fn test_call_variadic() { - // CHECK: call void (i32, ...) @variadic_fn(i32 0, i8 {{.*}}, %Bar* {{.*}}) + // CHECK: call void (i32, ...) @variadic_fn(i32 0, i8 {{.*}}, {{%Bar\*|ptr}} {{.*}}) variadic_fn(0, Foo(0), Bar(0, 0, 0)) } diff --git a/src/test/codegen/lifetime_start_end.rs b/src/test/codegen/lifetime_start_end.rs index da35789ce8de7..471a0b8cedd78 100644 --- a/src/test/codegen/lifetime_start_end.rs +++ b/src/test/codegen/lifetime_start_end.rs @@ -5,38 +5,30 @@ // CHECK-LABEL: @test #[no_mangle] pub fn test() { - let a = 0; + let a = 0u8; &a; // keep variable in an alloca -// CHECK: [[S_a:%[0-9]+]] = bitcast i32* %a to i8* -// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, i8* [[S_a]]) +// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, {{i8\*|ptr}} %a) { let b = &Some(a); &b; // keep variable in an alloca -// CHECK: [[S_b:%[0-9]+]] = bitcast { i32, i32 }** %b to i8* -// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, i8* [[S_b]]) +// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, {{.*}}) -// CHECK: [[S__4:%[0-9]+]] = bitcast { i32, i32 }* %_5 to i8* -// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, i8* [[S__4]]) +// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, {{.*}}) -// CHECK: [[E__4:%[0-9]+]] = bitcast { i32, i32 }* %_5 to i8* -// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, i8* [[E__4]]) +// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, {{.*}}) -// CHECK: [[E_b:%[0-9]+]] = bitcast { i32, i32 }** %b to i8* -// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, i8* [[E_b]]) +// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, {{.*}}) } - let c = 1; + let c = 1u8; &c; // keep variable in an alloca -// CHECK: [[S_c:%[0-9]+]] = bitcast i32* %c to i8* -// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, i8* [[S_c]]) +// CHECK: call void @llvm.lifetime.start{{.*}}(i{{[0-9 ]+}}, {{i8\*|ptr}} %c) -// CHECK: [[E_c:%[0-9]+]] = bitcast i32* %c to i8* -// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, i8* [[E_c]]) +// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, {{i8\*|ptr}} %c) -// CHECK: [[E_a:%[0-9]+]] = bitcast i32* %a to i8* -// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, i8* [[E_a]]) +// CHECK: call void @llvm.lifetime.end{{.*}}(i{{[0-9 ]+}}, {{i8\*|ptr}} %a) } diff --git a/src/test/codegen/loads.rs b/src/test/codegen/loads.rs index c7444ce02fa25..07de385193f74 100644 --- a/src/test/codegen/loads.rs +++ b/src/test/codegen/loads.rs @@ -21,29 +21,29 @@ pub enum MyBool { #[repr(align(16))] pub struct Align16(u128); -// CHECK: @ptr_alignment_helper({}** {{.*}}align [[PTR_ALIGNMENT:[0-9]+]] +// CHECK: @ptr_alignment_helper({{.*}}align [[PTR_ALIGNMENT:[0-9]+]] #[no_mangle] pub fn ptr_alignment_helper(x: &&()) {} // CHECK-LABEL: @load_ref #[no_mangle] pub fn load_ref<'a>(x: &&'a i32) -> &'a i32 { -// CHECK: load i32*, i32** %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META:[0-9]+]], !noundef !{{[0-9]+}} +// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META:[0-9]+]], !noundef !{{[0-9]+}} *x } // CHECK-LABEL: @load_ref_higher_alignment #[no_mangle] pub fn load_ref_higher_alignment<'a>(x: &&'a Align16) -> &'a Align16 { -// CHECK: load {{%Align16|i128}}*, {{%Align16|i128}}** %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META:[0-9]+]], !noundef !{{[0-9]+}} +// CHECK: load {{%Align16\*|i128\*|ptr}}, {{%Align16\*\*|i128\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META:[0-9]+]], !noundef !{{[0-9]+}} *x } // CHECK-LABEL: @load_scalar_pair #[no_mangle] pub fn load_scalar_pair<'a>(x: &(&'a i32, &'a Align16)) -> (&'a i32, &'a Align16) { -// CHECK: load i32*, i32** %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}} -// CHECK: load i64*, i64** %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META]], !noundef !{{[0-9]+}} +// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}} +// CHECK: load {{i64\*|ptr}}, {{i64\*\*|ptr}} %{{.+}}, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_16_META]], !noundef !{{[0-9]+}} *x } @@ -51,70 +51,70 @@ pub fn load_scalar_pair<'a>(x: &(&'a i32, &'a Align16)) -> (&'a i32, &'a Align16 #[no_mangle] pub fn load_raw_pointer<'a>(x: &*const i32) -> *const i32 { // loaded raw pointer should not have !nonnull, !align, or !noundef metadata -// CHECK: load i32*, i32** %x, align [[PTR_ALIGNMENT]]{{$}} +// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]]{{$}} *x } // CHECK-LABEL: @load_box #[no_mangle] pub fn load_box<'a>(x: Box>) -> Box { -// CHECK: load i32*, i32** %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}} +// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x, align [[PTR_ALIGNMENT]], !nonnull !{{[0-9]+}}, !align ![[ALIGN_4_META]], !noundef !{{[0-9]+}} *x } // CHECK-LABEL: @load_bool #[no_mangle] pub fn load_bool(x: &bool) -> bool { -// CHECK: load i8, i8* %x, align 1, !range ![[BOOL_RANGE:[0-9]+]], !noundef !{{[0-9]+}} +// CHECK: load i8, {{i8\*|ptr}} %x, align 1, !range ![[BOOL_RANGE:[0-9]+]], !noundef !{{[0-9]+}} *x } // CHECK-LABEL: @load_maybeuninit_bool #[no_mangle] pub fn load_maybeuninit_bool(x: &MaybeUninit) -> MaybeUninit { -// CHECK: load i8, i8* %x, align 1{{$}} +// CHECK: load i8, {{i8\*|ptr}} %x, align 1{{$}} *x } // CHECK-LABEL: @load_enum_bool #[no_mangle] pub fn load_enum_bool(x: &MyBool) -> MyBool { -// CHECK: load i8, i8* %x, align 1, !range ![[BOOL_RANGE]], !noundef !{{[0-9]+}} +// CHECK: load i8, {{i8\*|ptr}} %x, align 1, !range ![[BOOL_RANGE]], !noundef !{{[0-9]+}} *x } // CHECK-LABEL: @load_maybeuninit_enum_bool #[no_mangle] pub fn load_maybeuninit_enum_bool(x: &MaybeUninit) -> MaybeUninit { -// CHECK: load i8, i8* %x, align 1{{$}} +// CHECK: load i8, {{i8\*|ptr}} %x, align 1{{$}} *x } // CHECK-LABEL: @load_int #[no_mangle] pub fn load_int(x: &u16) -> u16 { -// CHECK: load i16, i16* %x, align 2{{$}} +// CHECK: load i16, {{i16\*|ptr}} %x, align 2{{$}} *x } // CHECK-LABEL: @load_nonzero_int #[no_mangle] pub fn load_nonzero_int(x: &NonZeroU16) -> NonZeroU16 { -// CHECK: load i16, i16* %x, align 2, !range ![[NONZEROU16_RANGE:[0-9]+]], !noundef !{{[0-9]+}} +// CHECK: load i16, {{i16\*|ptr}} %x, align 2, !range ![[NONZEROU16_RANGE:[0-9]+]], !noundef !{{[0-9]+}} *x } // CHECK-LABEL: @load_option_nonzero_int #[no_mangle] pub fn load_option_nonzero_int(x: &Option) -> Option { -// CHECK: load i16, i16* %x, align 2{{$}} +// CHECK: load i16, {{i16\*|ptr}} %x, align 2{{$}} *x } // CHECK-LABEL: @borrow #[no_mangle] pub fn borrow(x: &i32) -> &i32 { -// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull +// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x{{.*}}, !nonnull &x; // keep variable in an alloca x } @@ -122,7 +122,7 @@ pub fn borrow(x: &i32) -> &i32 { // CHECK-LABEL: @_box #[no_mangle] pub fn _box(x: Box) -> i32 { -// CHECK: load {{(i32\*, )?}}i32** %x{{.*}}, !nonnull +// CHECK: load {{i32\*|ptr}}, {{i32\*\*|ptr}} %x{{.*}}, !nonnull *x } @@ -131,7 +131,7 @@ pub fn _box(x: Box) -> i32 { // dependent alignment #[no_mangle] pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4] { -// CHECK: [[VAR:%[0-9]+]] = load {{(i32, )?}}i32* %{{.*}}, align 1 +// CHECK: [[VAR:%[0-9]+]] = load i32, {{i32\*|ptr}} %{{.*}}, align 1 // CHECK: ret i32 [[VAR]] x } @@ -141,7 +141,7 @@ pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4] { // dependent alignment #[no_mangle] pub fn small_struct_alignment(x: Bytes) -> Bytes { -// CHECK: [[VAR:%[0-9]+]] = load {{(i32, )?}}i32* %{{.*}}, align 1 +// CHECK: [[VAR:%[0-9]+]] = load i32, {{i32\*|ptr}} %{{.*}}, align 1 // CHECK: ret i32 [[VAR]] x } diff --git a/src/test/codegen/match.rs b/src/test/codegen/match.rs index d0c0202052ed6..b203641fddbd0 100644 --- a/src/test/codegen/match.rs +++ b/src/test/codegen/match.rs @@ -17,10 +17,10 @@ pub fn exhaustive_match(e: E) -> u8 { // CHECK: [[OTHERWISE]]: // CHECK-NEXT: unreachable // CHECK: [[A]]: -// CHECK-NEXT: store i8 0, i8* %1, align 1 +// CHECK-NEXT: store i8 0, {{i8\*|ptr}} %1, align 1 // CHECK-NEXT: br label %[[EXIT:[a-zA-Z0-9_]+]] // CHECK: [[B]]: -// CHECK-NEXT: store i8 1, i8* %1, align 1 +// CHECK-NEXT: store i8 1, {{i8\*|ptr}} %1, align 1 // CHECK-NEXT: br label %[[EXIT:[a-zA-Z0-9_]+]] match e { E::A => 0, diff --git a/src/test/codegen/mem-replace-direct-memcpy.rs b/src/test/codegen/mem-replace-direct-memcpy.rs index 47f4fc27fd85a..d1c4c56dbe468 100644 --- a/src/test/codegen/mem-replace-direct-memcpy.rs +++ b/src/test/codegen/mem-replace-direct-memcpy.rs @@ -17,9 +17,9 @@ pub fn replace_byte(dst: &mut u8, src: u8) -> u8 { // CHECK-NOT: call void @llvm.memcpy // CHECK: ; core::ptr::read // CHECK-NOT: call void @llvm.memcpy -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{.*}}(i8* align 1 %{{.*}}, i8* align 1 %src, i{{.*}} 1, i1 false) +// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 1 %{{.*}}, {{i8\*|ptr}} align 1 %src, i{{.*}} 1, i1 false) // CHECK-NOT: call void @llvm.memcpy // CHECK: ; core::ptr::write // CHECK-NOT: call void @llvm.memcpy -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{.*}}(i8* align 1 %dst, i8* align 1 %src, i{{.*}} 1, i1 false) +// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 1 %dst, {{i8\*|ptr}} align 1 %src, i{{.*}} 1, i1 false) // CHECK-NOT: call void @llvm.memcpy diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 5d1fb80ec0051..fd63b4f0acd5c 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -18,8 +18,8 @@ pub struct Packed2 { // CHECK-LABEL: @write_pkd1 #[no_mangle] pub fn write_pkd1(pkd: &mut Packed1) -> u32 { -// CHECK: %{{.*}} = load i32, i32* %{{.*}}, align 1 -// CHECK: store i32 42, i32* %{{.*}}, align 1 +// CHECK: %{{.*}} = load i32, {{i32\*|ptr}} %{{.*}}, align 1 +// CHECK: store i32 42, {{i32\*|ptr}} %{{.*}}, align 1 let result = pkd.data; pkd.data = 42; result @@ -28,8 +28,8 @@ pub fn write_pkd1(pkd: &mut Packed1) -> u32 { // CHECK-LABEL: @write_pkd2 #[no_mangle] pub fn write_pkd2(pkd: &mut Packed2) -> u32 { -// CHECK: %{{.*}} = load i32, i32* %{{.*}}, align 2 -// CHECK: store i32 42, i32* %{{.*}}, align 2 +// CHECK: %{{.*}} = load i32, {{i32\*|ptr}} %{{.*}}, align 2 +// CHECK: store i32 42, {{i32\*|ptr}} %{{.*}}, align 2 let result = pkd.data; pkd.data = 42; result @@ -52,8 +52,8 @@ pub struct BigPacked2 { #[no_mangle] pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 { // CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array -// CHECK: call void %{{.*}}(%Array* noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]]) -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 1 %{{.*}}, i8* align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false) +// CHECK: call void %{{.*}}({{%Array\*|ptr}} noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]]) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 1 %{{.*}}, {{i8\*|ptr}} align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false) // check that calls whose destination is a field of a packed struct // go through an alloca rather than calling the function with an // unaligned destination. @@ -64,8 +64,8 @@ pub fn call_pkd1(f: fn() -> Array) -> BigPacked1 { #[no_mangle] pub fn call_pkd2(f: fn() -> Array) -> BigPacked2 { // CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array -// CHECK: call void %{{.*}}(%Array* noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]]) -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 2 %{{.*}}, i8* align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false) +// CHECK: call void %{{.*}}({{%Array\*|ptr}} noalias nocapture noundef sret{{.*}} dereferenceable(32) [[ALLOCA]]) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 2 %{{.*}}, {{i8\*|ptr}} align 4 %{{.*}}, i{{[0-9]+}} 32, i1 false) // check that calls whose destination is a field of a packed struct // go through an alloca rather than calling the function with an // unaligned destination. @@ -73,9 +73,9 @@ pub fn call_pkd2(f: fn() -> Array) -> BigPacked2 { } // CHECK-LABEL: @write_packed_array1 -// CHECK: store i32 0, i32* %{{.+}}, align 1 -// CHECK: store i32 1, i32* %{{.+}}, align 1 -// CHECK: store i32 2, i32* %{{.+}}, align 1 +// CHECK: store i32 0, {{i32\*|ptr}} %{{.+}}, align 1 +// CHECK: store i32 1, {{i32\*|ptr}} %{{.+}}, align 1 +// CHECK: store i32 2, {{i32\*|ptr}} %{{.+}}, align 1 #[no_mangle] pub fn write_packed_array1(p: &mut BigPacked1) { p.data.0[0] = 0; @@ -84,9 +84,9 @@ pub fn write_packed_array1(p: &mut BigPacked1) { } // CHECK-LABEL: @write_packed_array2 -// CHECK: store i32 0, i32* %{{.+}}, align 2 -// CHECK: store i32 1, i32* %{{.+}}, align 2 -// CHECK: store i32 2, i32* %{{.+}}, align 2 +// CHECK: store i32 0, {{i32\*|ptr}} %{{.+}}, align 2 +// CHECK: store i32 1, {{i32\*|ptr}} %{{.+}}, align 2 +// CHECK: store i32 2, {{i32\*|ptr}} %{{.+}}, align 2 #[no_mangle] pub fn write_packed_array2(p: &mut BigPacked2) { p.data.0[0] = 0; @@ -95,14 +95,14 @@ pub fn write_packed_array2(p: &mut BigPacked2) { } // CHECK-LABEL: @repeat_packed_array1 -// CHECK: store i32 42, i32* %{{.+}}, align 1 +// CHECK: store i32 42, {{i32\*|ptr}} %{{.+}}, align 1 #[no_mangle] pub fn repeat_packed_array1(p: &mut BigPacked1) { p.data.0 = [42; 8]; } // CHECK-LABEL: @repeat_packed_array2 -// CHECK: store i32 42, i32* %{{.+}}, align 2 +// CHECK: store i32 42, {{i32\*|ptr}} %{{.+}}, align 2 #[no_mangle] pub fn repeat_packed_array2(p: &mut BigPacked2) { p.data.0 = [42; 8]; @@ -119,14 +119,14 @@ pub struct Packed2Pair(u8, u32); // CHECK-LABEL: @pkd1_pair #[no_mangle] pub fn pkd1_pair(pair1: &mut Packed1Pair, pair2: &mut Packed1Pair) { -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 1 %{{.*}}, i8* align 1 %{{.*}}, i{{[0-9]+}} 5, i1 false) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 1 %{{.*}}, {{i8\*|ptr}} align 1 %{{.*}}, i{{[0-9]+}} 5, i1 false) *pair2 = *pair1; } // CHECK-LABEL: @pkd2_pair #[no_mangle] pub fn pkd2_pair(pair1: &mut Packed2Pair, pair2: &mut Packed2Pair) { -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 2 %{{.*}}, i8* align 2 %{{.*}}, i{{[0-9]+}} 6, i1 false) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 2 %{{.*}}, {{i8\*|ptr}} align 2 %{{.*}}, i{{[0-9]+}} 6, i1 false) *pair2 = *pair1; } @@ -141,13 +141,13 @@ pub struct Packed2NestedPair((u32, u32)); // CHECK-LABEL: @pkd1_nested_pair #[no_mangle] pub fn pkd1_nested_pair(pair1: &mut Packed1NestedPair, pair2: &mut Packed1NestedPair) { -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 1 %{{.*}}, i8* align 1 %{{.*}}, i{{[0-9]+}} 8, i1 false) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 1 %{{.*}}, {{i8\*|ptr}} align 1 %{{.*}}, i{{[0-9]+}} 8, i1 false) *pair2 = *pair1; } // CHECK-LABEL: @pkd2_nested_pair #[no_mangle] pub fn pkd2_nested_pair(pair1: &mut Packed2NestedPair, pair2: &mut Packed2NestedPair) { -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 2 %{{.*}}, i8* align 2 %{{.*}}, i{{[0-9]+}} 8, i1 false) +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 2 %{{.*}}, {{i8\*|ptr}} align 2 %{{.*}}, i{{[0-9]+}} 8, i1 false) *pair2 = *pair1; } diff --git a/src/test/codegen/personality_lifetimes.rs b/src/test/codegen/personality_lifetimes.rs index 9f07488a9a8a6..2104022f57874 100644 --- a/src/test/codegen/personality_lifetimes.rs +++ b/src/test/codegen/personality_lifetimes.rs @@ -21,13 +21,11 @@ pub fn test() { let _s = S; // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just // in the first one. - // CHECK: [[SLOT:%[0-9]+]] = alloca { i8*, i32 } + // CHECK: [[SLOT:%[0-9]+]] = alloca { {{i8\*|ptr}}, i32 } // CHECK-LABEL: cleanup: - // CHECK: [[BITCAST:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8* - // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST]]) + // CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}}) // CHECK-LABEL: cleanup1: - // CHECK: [[BITCAST1:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8* - // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST1]]) + // CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}}) might_unwind(); let _t = S; might_unwind(); diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index b4cc26f3f9d79..0b796754d1d86 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -13,10 +13,10 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0 -// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 -// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] +// CHECK: [[X0:%[0-9]+]] = getelementptr inbounds { {{\[0 x i8\]\*|ptr}}, [[USIZE]] }, {{.*}} %x, i32 0, i32 0 +// CHECK: store {{\[0 x i8\]\*|ptr}} %s.0, {{.*}} [[X0]] +// CHECK: [[X1:%[0-9]+]] = getelementptr inbounds { {{\[0 x i8\]\*|ptr}}, [[USIZE]] }, {{.*}} %x, i32 0, i32 1 +// CHECK: store [[USIZE]] %s.1, {{.*}} [[X1]] let x = &*s; &x; // keep variable in an alloca diff --git a/src/test/codegen/repeat-trusted-len.rs b/src/test/codegen/repeat-trusted-len.rs index cb2d0ef809af4..7aebd3ec7df0a 100644 --- a/src/test/codegen/repeat-trusted-len.rs +++ b/src/test/codegen/repeat-trusted-len.rs @@ -8,6 +8,6 @@ use std::iter; // CHECK-LABEL: @repeat_take_collect #[no_mangle] pub fn repeat_take_collect() -> Vec { -// CHECK: call void @llvm.memset.p0i8.i{{[0-9]+}}(i8* {{.*}}align 1{{.*}} %{{[0-9]+}}, i8 42, i{{[0-9]+}} 100000, i1 false) +// CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}align 1{{.*}} %{{[0-9]+}}, i8 42, i{{[0-9]+}} 100000, i1 false) iter::repeat(42).take(100000).collect() } diff --git a/src/test/codegen/repr-transparent-aggregates-1.rs b/src/test/codegen/repr-transparent-aggregates-1.rs index 4ad3642c03d68..9d18c5f03c665 100644 --- a/src/test/codegen/repr-transparent-aggregates-1.rs +++ b/src/test/codegen/repr-transparent-aggregates-1.rs @@ -33,19 +33,19 @@ pub enum TeBigS { Variant(BigS), } -// CHECK: define{{.*}}void @test_BigS(%BigS* [[BIGS_RET_ATTRS1:.*]] sret(%BigS) [[BIGS_RET_ATTRS2:.*]], %BigS* [[BIGS_ARG_ATTRS1:.*]] byval(%BigS) [[BIGS_ARG_ATTRS2:.*]]) +// CHECK: define{{.*}}void @test_BigS({{%BigS\*|ptr}} [[BIGS_RET_ATTRS1:.*]] sret(%BigS) [[BIGS_RET_ATTRS2:.*]], {{%BigS\*|ptr}} [[BIGS_ARG_ATTRS1:.*]] byval(%BigS) [[BIGS_ARG_ATTRS2:.*]]) #[no_mangle] pub extern "C" fn test_BigS(_: BigS) -> BigS { loop {} } -// CHECK: define{{.*}}void @test_TsBigS(%TsBigS* [[BIGS_RET_ATTRS1]] sret(%TsBigS) [[BIGS_RET_ATTRS2]], %TsBigS* [[BIGS_ARG_ATTRS1]] byval(%TsBigS) [[BIGS_ARG_ATTRS2:.*]]) +// CHECK: define{{.*}}void @test_TsBigS({{%TsBigS\*|ptr}} [[BIGS_RET_ATTRS1]] sret(%TsBigS) [[BIGS_RET_ATTRS2]], {{%TsBigS\*|ptr}} [[BIGS_ARG_ATTRS1]] byval(%TsBigS) [[BIGS_ARG_ATTRS2:.*]]) #[no_mangle] pub extern "C" fn test_TsBigS(_: TsBigS) -> TsBigS { loop {} } -// CHECK: define{{.*}}void @test_TuBigS(%TuBigS* [[BIGS_RET_ATTRS1]] sret(%TuBigS) [[BIGS_RET_ATTRS2]], %TuBigS* [[BIGS_ARG_ATTRS1]] byval(%TuBigS) [[BIGS_ARG_ATTRS2:.*]]) +// CHECK: define{{.*}}void @test_TuBigS({{%TuBigS\*|ptr}} [[BIGS_RET_ATTRS1]] sret(%TuBigS) [[BIGS_RET_ATTRS2]], {{%TuBigS\*|ptr}} [[BIGS_ARG_ATTRS1]] byval(%TuBigS) [[BIGS_ARG_ATTRS2:.*]]) #[no_mangle] pub extern "C" fn test_TuBigS(_: TuBigS) -> TuBigS { loop {} } -// CHECK: define{{.*}}void @test_TeBigS(%"TeBigS::Variant"* [[BIGS_RET_ATTRS1]] sret(%"TeBigS::Variant") [[BIGS_RET_ATTRS2]], %"TeBigS::Variant"* [[BIGS_ARG_ATTRS1]] byval(%"TeBigS::Variant") [[BIGS_ARG_ATTRS2]]) +// CHECK: define{{.*}}void @test_TeBigS({{%"TeBigS::Variant"\*|ptr}} [[BIGS_RET_ATTRS1]] sret(%"TeBigS::Variant") [[BIGS_RET_ATTRS2]], {{%"TeBigS::Variant"\*|ptr}} [[BIGS_ARG_ATTRS1]] byval(%"TeBigS::Variant") [[BIGS_ARG_ATTRS2]]) #[no_mangle] pub extern "C" fn test_TeBigS(_: TeBigS) -> TeBigS { loop {} } @@ -69,18 +69,18 @@ pub enum TeBigU { Variant(BigU), } -// CHECK: define{{.*}}void @test_BigU(%BigU* [[BIGU_RET_ATTRS1:.*]] sret(%BigU) [[BIGU_RET_ATTRS2:.*]], %BigU* [[BIGU_ARG_ATTRS1:.*]] byval(%BigU) [[BIGU_ARG_ATTRS2:.*]]) +// CHECK: define{{.*}}void @test_BigU({{%BigU\*|ptr}} [[BIGU_RET_ATTRS1:.*]] sret(%BigU) [[BIGU_RET_ATTRS2:.*]], {{%BigU\*|ptr}} [[BIGU_ARG_ATTRS1:.*]] byval(%BigU) [[BIGU_ARG_ATTRS2:.*]]) #[no_mangle] pub extern "C" fn test_BigU(_: BigU) -> BigU { loop {} } -// CHECK: define{{.*}}void @test_TsBigU(%TsBigU* [[BIGU_RET_ATTRS1:.*]] sret(%TsBigU) [[BIGU_RET_ATTRS2:.*]], %TsBigU* [[BIGU_ARG_ATTRS1]] byval(%TsBigU) [[BIGU_ARG_ATTRS2]]) +// CHECK: define{{.*}}void @test_TsBigU({{%TsBigU\*|ptr}} [[BIGU_RET_ATTRS1:.*]] sret(%TsBigU) [[BIGU_RET_ATTRS2:.*]], {{%TsBigU\*|ptr}} [[BIGU_ARG_ATTRS1]] byval(%TsBigU) [[BIGU_ARG_ATTRS2]]) #[no_mangle] pub extern "C" fn test_TsBigU(_: TsBigU) -> TsBigU { loop {} } -// CHECK: define{{.*}}void @test_TuBigU(%TuBigU* [[BIGU_RET_ATTRS1]] sret(%TuBigU) [[BIGU_RET_ATTRS2:.*]], %TuBigU* [[BIGU_ARG_ATTRS1]] byval(%TuBigU) [[BIGU_ARG_ATTRS2]]) +// CHECK: define{{.*}}void @test_TuBigU({{%TuBigU\*|ptr}} [[BIGU_RET_ATTRS1]] sret(%TuBigU) [[BIGU_RET_ATTRS2:.*]], {{%TuBigU\*|ptr}} [[BIGU_ARG_ATTRS1]] byval(%TuBigU) [[BIGU_ARG_ATTRS2]]) #[no_mangle] pub extern "C" fn test_TuBigU(_: TuBigU) -> TuBigU { loop {} } -// CHECK: define{{.*}}void @test_TeBigU(%"TeBigU::Variant"* [[BIGU_RET_ATTRS1]] sret(%"TeBigU::Variant") [[BIGU_RET_ATTRS2:.*]], %"TeBigU::Variant"* [[BIGU_ARG_ATTRS1]] byval(%"TeBigU::Variant") [[BIGU_ARG_ATTRS2]]) +// CHECK: define{{.*}}void @test_TeBigU({{%"TeBigU::Variant"\*|ptr}} [[BIGU_RET_ATTRS1]] sret(%"TeBigU::Variant") [[BIGU_RET_ATTRS2:.*]], {{%"TeBigU::Variant"\*|ptr}} [[BIGU_ARG_ATTRS1]] byval(%"TeBigU::Variant") [[BIGU_ARG_ATTRS2]]) #[no_mangle] pub extern "C" fn test_TeBigU(_: TeBigU) -> TeBigU { loop {} } diff --git a/src/test/codegen/repr-transparent.rs b/src/test/codegen/repr-transparent.rs index 53da573ae935d..c68ba8460cb14 100644 --- a/src/test/codegen/repr-transparent.rs +++ b/src/test/codegen/repr-transparent.rs @@ -24,7 +24,7 @@ pub extern "C" fn test_F32(_: F32) -> F32 { loop {} } #[repr(transparent)] pub struct Ptr(*mut u8); -// CHECK: define{{.*}}i8* @test_Ptr(i8* %_1) +// CHECK: define{{.*}}{{i8\*|ptr}} @test_Ptr({{i8\*|ptr}} %_1) #[no_mangle] pub extern "C" fn test_Ptr(_: Ptr) -> Ptr { loop {} } @@ -39,7 +39,7 @@ pub extern "C" fn test_WithZst(_: WithZst) -> WithZst { loop {} } pub struct WithZeroSizedArray(*const f32, [i8; 0]); // Apparently we use i32* when newtype-unwrapping f32 pointers. Whatever. -// CHECK: define{{.*}}i32* @test_WithZeroSizedArray(i32* %_1) +// CHECK: define{{.*}}{{i32\*|ptr}} @test_WithZeroSizedArray({{i32\*|ptr}} %_1) #[no_mangle] pub extern "C" fn test_WithZeroSizedArray(_: WithZeroSizedArray) -> WithZeroSizedArray { loop {} } @@ -63,7 +63,7 @@ pub extern "C" fn test_Gpz(_: GenericPlusZst) -> GenericPlusZst { lo #[repr(transparent)] pub struct LifetimePhantom<'a, T: 'a>(*const T, PhantomData<&'a T>); -// CHECK: define{{.*}}i16* @test_LifetimePhantom(i16* %_1) +// CHECK: define{{.*}}{{i16\*|ptr}} @test_LifetimePhantom({{i16\*|ptr}} %_1) #[no_mangle] pub extern "C" fn test_LifetimePhantom(_: LifetimePhantom) -> LifetimePhantom { loop {} } diff --git a/src/test/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs b/src/test/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs index 7f0f678062a64..61c4b7b51af7b 100644 --- a/src/test/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs +++ b/src/test/codegen/riscv-abi/riscv64-lp64-lp64f-lp64d-abi.rs @@ -127,18 +127,18 @@ pub struct Large { d: i64, } -// CHECK: define void @f_agg_large(%Large* {{.*}}%x) +// CHECK: define void @f_agg_large({{%Large\*|ptr}} {{.*}}%x) #[no_mangle] pub extern "C" fn f_agg_large(mut x: Large) { } -// CHECK: define void @f_agg_large_ret(%Large* {{.*}}sret{{.*}}, i32 signext %i, i8 signext %j) +// CHECK: define void @f_agg_large_ret({{%Large\*|ptr}} {{.*}}sret{{.*}}, i32 signext %i, i8 signext %j) #[no_mangle] pub extern "C" fn f_agg_large_ret(i: i32, j: i8) -> Large { Large { a: 1, b: 2, c: 3, d: 4 } } -// CHECK: define void @f_scalar_stack_1(i64 %0, [2 x i64] %1, i128 %2, %Large* {{.*}}%d, i8 zeroext %e, i8 signext %f, i8 %g, i8 %h) +// CHECK: define void @f_scalar_stack_1(i64 %0, [2 x i64] %1, i128 %2, {{%Large\*|ptr}} {{.*}}%d, i8 zeroext %e, i8 signext %f, i8 %g, i8 %h) #[no_mangle] pub extern "C" fn f_scalar_stack_1( a: Tiny, @@ -152,7 +152,7 @@ pub extern "C" fn f_scalar_stack_1( ) { } -// CHECK: define void @f_scalar_stack_2(%Large* {{.*}}sret{{.*}} %0, i64 %a, i128 %1, i128 %2, i64 %d, i8 zeroext %e, i8 %f, i8 %g) +// CHECK: define void @f_scalar_stack_2({{%Large\*|ptr}} {{.*}}sret{{.*}} %0, i64 %a, i128 %1, i128 %2, i64 %d, i8 zeroext %e, i8 %f, i8 %g) #[no_mangle] pub extern "C" fn f_scalar_stack_2( a: u64, @@ -172,7 +172,7 @@ extern "C" { #[no_mangle] pub unsafe extern "C" fn f_va_caller() { - // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i64 3, double {{.*}}, double {{.*}}, i64 {{.*}}, [2 x i64] {{.*}}, i128 {{.*}}, %Large* {{.*}}) + // CHECK: call signext i32 (i32, ...) @f_va_callee(i32 signext 1, i32 signext 2, i64 3, double {{.*}}, double {{.*}}, i64 {{.*}}, [2 x i64] {{.*}}, i128 {{.*}}, {{%Large\*|ptr}} {{.*}}) f_va_callee( 1, 2i32, diff --git a/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs b/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs index e2e0fc16dfa94..cacc32f2f1b66 100644 --- a/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs +++ b/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs @@ -23,7 +23,7 @@ extern "platform-intrinsic" { #[no_mangle] pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2, values: Vec2) -> Vec2 { - // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) + // CHECK: call <2 x float> @llvm.masked.gather.v2f32.{{.+}}(<2 x {{float\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) simd_gather(values, pointers, mask) } @@ -31,6 +31,6 @@ pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2, #[no_mangle] pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2, values: Vec2<*const f32>) -> Vec2<*const f32> { - // CHECK: call <2 x float*> @llvm.masked.gather.v2p0f32.v2p0p0f32(<2 x float**> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float*> {{.*}}) + // CHECK: call <2 x {{float\*|ptr}}> @llvm.masked.gather.{{.+}}(<2 x {{float\*\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x {{float\*|ptr}}> {{.*}}) simd_gather(values, pointers, mask) } diff --git a/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs b/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs index 050a0e5b42620..94ecaf6096d5c 100644 --- a/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs +++ b/src/test/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs @@ -23,7 +23,7 @@ extern "platform-intrinsic" { #[no_mangle] pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2, values: Vec2) { - // CHECK: call void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> {{.*}}, <2 x float*> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + // CHECK: call void @llvm.masked.scatter.v2f32.v2p0{{.*}}(<2 x float> {{.*}}, <2 x {{float\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) simd_scatter(values, pointers, mask) } @@ -32,6 +32,6 @@ pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2, #[no_mangle] pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2, values: Vec2<*const f32>) { - // CHECK: call void @llvm.masked.scatter.v2p0f32.v2p0p0f32(<2 x float*> {{.*}}, <2 x float**> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + // CHECK: call void @llvm.masked.scatter.v2p0{{.*}}.v2p0{{.*}}(<2 x {{float\*|ptr}}> {{.*}}, <2 x {{float\*\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) simd_scatter(values, pointers, mask) } diff --git a/src/test/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs b/src/test/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs index 7d9b0d2a77bc1..db5b60567faa4 100644 --- a/src/test/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs +++ b/src/test/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs @@ -21,23 +21,23 @@ pub struct U(f32, f32, f32, f32); // CHECK-LABEL: @build_array_s #[no_mangle] pub fn build_array_s(x: [f32; 4]) -> S<4> { - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]+}}(i8* {{.*}} %{{[0-9]+}}, i8* {{.*}} %{{[0-9]+}}, i{{[0-9]+}} 16, i1 false) - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]+}}(i8* {{.*}} %{{[0-9]+}}, i8* {{.*}} %{{[0-9]+}}, i{{[0-9]+}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) S::<4>(x) } // CHECK-LABEL: @build_array_t #[no_mangle] pub fn build_array_t(x: [f32; 4]) -> T { - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]+}}(i8* {{.*}} %{{[0-9]+}}, i8* {{.*}} %{{[0-9]+}}, i{{[0-9]+}} 16, i1 false) - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]+}}(i8* {{.*}} %{{[0-9]+}}, i8* {{.*}} %{{[0-9]+}}, i{{[0-9]+}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) T(x) } // CHECK-LABEL: @build_array_u #[no_mangle] pub fn build_array_u(x: [f32; 4]) -> U { - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]+}}(i8* {{.*}} %{{[0-9]+}}, i8* {{.*}} %{{[0-9]+}}, i{{[0-9]+}} 16, i1 false) - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{[0-9]+}}(i8* {{.*}} %{{[0-9]+}}, i8* {{.*}} %{{[0-9]+}}, i{{[0-9]+}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) unsafe { std::mem::transmute(x) } } diff --git a/src/test/codegen/simd_arith_offset.rs b/src/test/codegen/simd_arith_offset.rs index a858270d4e76d..7b623a22a0b21 100644 --- a/src/test/codegen/simd_arith_offset.rs +++ b/src/test/codegen/simd_arith_offset.rs @@ -21,6 +21,6 @@ pub struct Simd([T; LANES]); // CHECK-LABEL: smoke #[no_mangle] pub fn smoke(ptrs: SimdConstPtr, offsets: Simd) -> SimdConstPtr { - // CHECK: getelementptr i8, <8 x i8*> %_3, <8 x i64> %_4 + // CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %_3, <8 x i64> %_4 unsafe { simd_arith_offset(ptrs, offsets) } } diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs index 6c79ddb0a7abd..794b773a7763c 100644 --- a/src/test/codegen/slice-init.rs +++ b/src/test/codegen/slice-init.rs @@ -6,7 +6,7 @@ #[no_mangle] pub fn zero_sized_elem() { // CHECK-NOT: br label %repeat_loop_header{{.*}} - // CHECK-NOT: call void @llvm.memset.p0i8 + // CHECK-NOT: call void @llvm.memset.p0 let x = [(); 4]; drop(&x); } @@ -15,7 +15,7 @@ pub fn zero_sized_elem() { #[no_mangle] pub fn zero_len_array() { // CHECK-NOT: br label %repeat_loop_header{{.*}} - // CHECK-NOT: call void @llvm.memset.p0i8 + // CHECK-NOT: call void @llvm.memset.p0 let x = [4; 0]; drop(&x); } @@ -23,7 +23,7 @@ pub fn zero_len_array() { // CHECK-LABEL: @byte_array #[no_mangle] pub fn byte_array() { - // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4 + // CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}, i8 7, i{{[0-9]+}} 4 // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [7u8; 4]; drop(&x); @@ -39,7 +39,7 @@ enum Init { // CHECK-LABEL: @byte_enum_array #[no_mangle] pub fn byte_enum_array() { - // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4 + // CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}, i8 {{.*}}, i{{[0-9]+}} 4 // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [Init::Memset; 4]; drop(&x); @@ -48,7 +48,7 @@ pub fn byte_enum_array() { // CHECK-LABEL: @zeroed_integer_array #[no_mangle] pub fn zeroed_integer_array() { - // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16 + // CHECK: call void @llvm.memset.{{.+}}({{i8\*|ptr}} {{.*}}, i8 0, i{{[0-9]+}} 16 // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [0u32; 4]; drop(&x); @@ -58,7 +58,7 @@ pub fn zeroed_integer_array() { #[no_mangle] pub fn nonzero_integer_array() { // CHECK: br label %repeat_loop_header{{.*}} - // CHECK-NOT: call void @llvm.memset.p0i8 + // CHECK-NOT: call void @llvm.memset.p0 let x = [0x1a_2b_3c_4d_u32; 4]; drop(&x); } diff --git a/src/test/codegen/slice-iter-len-eq-zero.rs b/src/test/codegen/slice-iter-len-eq-zero.rs index a5516833900a8..fd19e624cdddb 100644 --- a/src/test/codegen/slice-iter-len-eq-zero.rs +++ b/src/test/codegen/slice-iter-len-eq-zero.rs @@ -8,7 +8,7 @@ type Demo = [u8; 3]; #[no_mangle] pub fn slice_iter_len_eq_zero(y: std::slice::Iter<'_, Demo>) -> bool { // CHECK-NOT: sub - // CHECK: %2 = icmp eq i8* %1, %0 + // CHECK: %2 = icmp eq {{i8\*|ptr}} %1, %0 // CHECK: ret i1 %2 y.len() == 0 } diff --git a/src/test/codegen/slice-ref-equality.rs b/src/test/codegen/slice-ref-equality.rs index c06554ecdec22..e5cde5e9e7460 100644 --- a/src/test/codegen/slice-ref-equality.rs +++ b/src/test/codegen/slice-ref-equality.rs @@ -13,9 +13,7 @@ // CHECK-LABEL: @is_zero_slice_long #[no_mangle] pub fn is_zero_slice_long(data: &[u8; 456]) -> bool { - // CHECK: : - // CHECK-NEXT: %{{.+}} = getelementptr {{.+}} - // CHECK-NEXT: %[[BCMP:.+]] = tail call i32 @{{bcmp|memcmp}}({{.+}}) + // CHECK: %[[BCMP:.+]] = tail call i32 @{{bcmp|memcmp}}({{.+}}) // CHECK-NEXT: %[[EQ:.+]] = icmp eq i32 %[[BCMP]], 0 // CHECK-NEXT: ret i1 %[[EQ]] &data[..] == [0; 456] @@ -24,9 +22,7 @@ pub fn is_zero_slice_long(data: &[u8; 456]) -> bool { // CHECK-LABEL: @is_zero_slice_short #[no_mangle] pub fn is_zero_slice_short(data: &[u8; 4]) -> bool { - // CHECK: : - // CHECK-NEXT: %[[PTR:.+]] = bitcast [4 x i8]* {{.+}} to i32* - // CHECK-NEXT: %[[LOAD:.+]] = load i32, i32* %[[PTR]], align 1 + // CHECK: %[[LOAD:.+]] = load i32, {{i32\*|ptr}} %{{.+}}, align 1 // CHECK-NEXT: %[[EQ:.+]] = icmp eq i32 %[[LOAD]], 0 // CHECK-NEXT: ret i1 %[[EQ]] &data[..] == [0; 4] @@ -35,9 +31,7 @@ pub fn is_zero_slice_short(data: &[u8; 4]) -> bool { // CHECK-LABEL: @is_zero_array #[no_mangle] pub fn is_zero_array(data: &[u8; 4]) -> bool { - // CHECK: start: - // CHECK-NEXT: %[[PTR:.+]] = bitcast [4 x i8]* {{.+}} to i32* - // CHECK-NEXT: %[[LOAD:.+]] = load i32, i32* %[[PTR]], align 1 + // CHECK: %[[LOAD:.+]] = load i32, {{i32\*|ptr}} %{{.+}}, align 1 // CHECK-NEXT: %[[EQ:.+]] = icmp eq i32 %[[LOAD]], 0 // CHECK-NEXT: ret i1 %[[EQ]] *data == [0; 4] diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 17f051a5bce0a..837256e536942 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -17,10 +17,8 @@ pub struct Bytes { pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { // CHECK: [[TMP:%.+]] = alloca i32 // CHECK: %y = alloca [4 x i8] -// CHECK: store i32 %0, i32* [[TMP]] -// CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %y to i8* -// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 1 [[Y8]], i8* align 4 [[TMP8]], i{{[0-9]+}} 4, i1 false) +// CHECK: store i32 %0, {{i32\*|ptr}} [[TMP]] +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 1 {{.+}}, {{i8\*|ptr}} align 4 {{.+}}, i{{[0-9]+}} 4, i1 false) *x = y; } @@ -31,9 +29,7 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { // CHECK: [[TMP:%.+]] = alloca i32 // CHECK: %y = alloca %Bytes -// CHECK: store i32 %0, i32* [[TMP]] -// CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %y to i8* -// CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* -// CHECK: call void @llvm.memcpy.{{.*}}(i8* align 1 [[Y8]], i8* align 4 [[TMP8]], i{{[0-9]+}} 4, i1 false) +// CHECK: store i32 %0, {{i32\*|ptr}} [[TMP]] +// CHECK: call void @llvm.memcpy.{{.*}}({{i8\*|ptr}} align 1 {{.+}}, {{i8\*|ptr}} align 4 {{.+}}, i{{[0-9]+}} 4, i1 false) *x = y; } diff --git a/src/test/codegen/swap-large-types.rs b/src/test/codegen/swap-large-types.rs index 91a1ab7144fd4..4a68403578d1e 100644 --- a/src/test/codegen/swap-large-types.rs +++ b/src/test/codegen/swap-large-types.rs @@ -83,9 +83,9 @@ pub struct BigButHighlyAligned([u8; 64 * 3]); #[no_mangle] pub fn swap_big_aligned(x: &mut BigButHighlyAligned, y: &mut BigButHighlyAligned) { // CHECK-NOT: call void @llvm.memcpy -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(192) -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(192) -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(192) +// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} noundef nonnull align 64 dereferenceable(192) +// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} noundef nonnull align 64 dereferenceable(192) +// CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} noundef nonnull align 64 dereferenceable(192) // CHECK-NOT: call void @llvm.memcpy swap(x, y) } diff --git a/src/test/codegen/thread-local.rs b/src/test/codegen/thread-local.rs index c59b088f7a6d6..0f1b29ca79bc6 100644 --- a/src/test/codegen/thread-local.rs +++ b/src/test/codegen/thread-local.rs @@ -19,7 +19,7 @@ thread_local!(static A: Cell = const { Cell::new(1) }); // CHECK-LABEL: @get #[no_mangle] fn get() -> u32 { - // CHECK: %0 = load i32, i32* {{.*}}[[TLS]]{{.*}} + // CHECK: %0 = load i32, {{.*}}[[TLS]]{{.*}} // CHECK-NEXT: ret i32 %0 A.with(|a| a.get()) } @@ -27,7 +27,7 @@ fn get() -> u32 { // CHECK-LABEL: @set #[no_mangle] fn set(v: u32) { - // CHECK: store i32 %0, i32* {{.*}}[[TLS]]{{.*}} + // CHECK: store i32 %0, {{.*}}[[TLS]]{{.*}} // CHECK-NEXT: ret void A.with(|a| a.set(v)) } @@ -35,7 +35,7 @@ fn set(v: u32) { // CHECK-LABEL: @get_aux #[no_mangle] fn get_aux() -> u64 { - // CHECK: %0 = load i64, i64* [[TLS_AUX]] + // CHECK: %0 = load i64, {{.*}}[[TLS_AUX]] // CHECK-NEXT: ret i64 %0 aux::A.with(|a| a.get()) } @@ -43,7 +43,7 @@ fn get_aux() -> u64 { // CHECK-LABEL: @set_aux #[no_mangle] fn set_aux(v: u64) { - // CHECK: store i64 %0, i64* [[TLS_AUX]] + // CHECK: store i64 %0, {{.*}}[[TLS_AUX]] // CHECK-NEXT: ret void aux::A.with(|a| a.set(v)) } diff --git a/src/test/codegen/transmute-scalar.rs b/src/test/codegen/transmute-scalar.rs index a5aeb80de85c3..a7e5deeffd8e2 100644 --- a/src/test/codegen/transmute-scalar.rs +++ b/src/test/codegen/transmute-scalar.rs @@ -6,10 +6,9 @@ // scalar `bitcast`, more special-casing is required to remove `alloca` usage. // CHECK-LABEL: define{{.*}}i32 @f32_to_bits(float %x) -// CHECK: %2 = bitcast float %x to i32 -// CHECK-NEXT: store i32 %2, i32* %0 -// CHECK-NEXT: %3 = load i32, i32* %0 -// CHECK: ret i32 %3 +// CHECK: store i32 %{{.*}}, {{.*}} %0 +// CHECK-NEXT: %[[RES:.*]] = load i32, {{.*}} %0 +// CHECK: ret i32 %[[RES]] #[no_mangle] pub fn f32_to_bits(x: f32) -> u32 { unsafe { std::mem::transmute(x) } @@ -17,8 +16,8 @@ pub fn f32_to_bits(x: f32) -> u32 { // CHECK-LABEL: define{{.*}}i8 @bool_to_byte(i1 noundef zeroext %b) // CHECK: %1 = zext i1 %b to i8 -// CHECK-NEXT: store i8 %1, i8* %0 -// CHECK-NEXT: %2 = load i8, i8* %0 +// CHECK-NEXT: store i8 %1, {{.*}} %0 +// CHECK-NEXT: %2 = load i8, {{.*}} %0 // CHECK: ret i8 %2 #[no_mangle] pub fn bool_to_byte(b: bool) -> u8 { @@ -28,8 +27,8 @@ pub fn bool_to_byte(b: bool) -> u8 { // CHECK-LABEL: define{{.*}}noundef zeroext i1 @byte_to_bool(i8 %byte) // CHECK: %1 = trunc i8 %byte to i1 // CHECK-NEXT: %2 = zext i1 %1 to i8 -// CHECK-NEXT: store i8 %2, i8* %0 -// CHECK-NEXT: %3 = load i8, i8* %0 +// CHECK-NEXT: store i8 %2, {{.*}} %0 +// CHECK-NEXT: %3 = load i8, {{.*}} %0 // CHECK-NEXT: %4 = trunc i8 %3 to i1 // CHECK: ret i1 %4 #[no_mangle] @@ -37,11 +36,10 @@ pub unsafe fn byte_to_bool(byte: u8) -> bool { std::mem::transmute(byte) } -// CHECK-LABEL: define{{.*}}i8* @ptr_to_ptr(i16* %p) -// CHECK: %2 = bitcast i16* %p to i8* -// CHECK-NEXT: store i8* %2, i8** %0 -// CHECK-NEXT: %3 = load i8*, i8** %0 -// CHECK: ret i8* %3 +// CHECK-LABEL: define{{.*}}{{i8\*|ptr}} @ptr_to_ptr({{i16\*|ptr}} %p) +// CHECK: store {{i8\*|ptr}} %{{.*}}, {{.*}} %0 +// CHECK-NEXT: %[[RES:.*]] = load {{i8\*|ptr}}, {{.*}} %0 +// CHECK: ret {{i8\*|ptr}} %[[RES]] #[no_mangle] pub fn ptr_to_ptr(p: *mut u16) -> *mut u8 { unsafe { std::mem::transmute(p) } @@ -54,31 +52,29 @@ pub fn ptr_to_ptr(p: *mut u16) -> *mut u8 { // Tests below show the non-special-cased behavior (with the possible // future special-cased instructions in the "NOTE(eddyb)" comments). -// CHECK: define{{.*}}[[USIZE:i[0-9]+]] @ptr_to_int(i16* %p) +// CHECK: define{{.*}}[[USIZE:i[0-9]+]] @ptr_to_int({{i16\*|ptr}} %p) // NOTE(eddyb) see above, the following two CHECK lines should ideally be this: // %2 = ptrtoint i16* %p to [[USIZE]] // store [[USIZE]] %2, [[USIZE]]* %0 -// CHECK: %2 = bitcast [[USIZE]]* %0 to i16** -// CHECK-NEXT: store i16* %p, i16** %2 +// CHECK: store {{i16\*|ptr}} %p, {{.*}} -// CHECK-NEXT: %3 = load [[USIZE]], [[USIZE]]* %0 -// CHECK: ret [[USIZE]] %3 +// CHECK-NEXT: %[[RES:.*]] = load [[USIZE]], {{.*}} %0 +// CHECK: ret [[USIZE]] %[[RES]] #[no_mangle] pub fn ptr_to_int(p: *mut u16) -> usize { unsafe { std::mem::transmute(p) } } -// CHECK: define{{.*}}i16* @int_to_ptr([[USIZE]] %i) +// CHECK: define{{.*}}{{i16\*|ptr}} @int_to_ptr([[USIZE]] %i) // NOTE(eddyb) see above, the following two CHECK lines should ideally be this: // %2 = inttoptr [[USIZE]] %i to i16* // store i16* %2, i16** %0 -// CHECK: %2 = bitcast i16** %0 to [[USIZE]]* -// CHECK-NEXT: store [[USIZE]] %i, [[USIZE]]* %2 +// CHECK: store [[USIZE]] %i, {{.*}} -// CHECK-NEXT: %3 = load i16*, i16** %0 -// CHECK: ret i16* %3 +// CHECK-NEXT: %[[RES:.*]] = load {{i16\*|ptr}}, {{.*}} %0 +// CHECK: ret {{i16\*|ptr}} %[[RES]] #[no_mangle] pub fn int_to_ptr(i: usize) -> *mut u16 { unsafe { std::mem::transmute(i) } diff --git a/src/test/codegen/uninit-consts.rs b/src/test/codegen/uninit-consts.rs index 3e370c7ba64f8..4c07740b35604 100644 --- a/src/test/codegen/uninit-consts.rs +++ b/src/test/codegen/uninit-consts.rs @@ -26,7 +26,7 @@ pub struct PartiallyUninit { #[no_mangle] pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> { const M: MaybeUninit<[u8; 10]> = MaybeUninit::uninit(); - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 1 %1, i8* align 1 getelementptr inbounds (<{ [10 x i8] }>, <{ [10 x i8] }>* [[FULLY_UNINIT]], i32 0, i32 0, i32 0), i{{(32|64)}} 10, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 1 %{{[0-9]+}}, {{i8\*|ptr}} align 1 {{.*}}[[FULLY_UNINIT]]{{.*}}, i{{(32|64)}} 10, i1 false) M } @@ -34,7 +34,7 @@ pub const fn fully_uninit() -> MaybeUninit<[u8; 10]> { #[no_mangle] pub const fn partially_uninit() -> PartiallyUninit { const X: PartiallyUninit = PartiallyUninit { x: 0xdeadbeef, y: MaybeUninit::uninit() }; - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [4 x i8], [12 x i8] }>, <{ [4 x i8], [12 x i8] }>* [[PARTIALLY_UNINIT]], i32 0, i32 0, i32 0), i{{(32|64)}} 16, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[PARTIALLY_UNINIT]]{{.*}}, i{{(32|64)}} 16, i1 false) X } @@ -42,7 +42,7 @@ pub const fn partially_uninit() -> PartiallyUninit { #[no_mangle] pub const fn uninit_padding_huge() -> [(u32, u8); 4096] { const X: [(u32, u8); 4096] = [(123, 45); 4096]; - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [32768 x i8] }>, <{ [32768 x i8] }>* [[UNINIT_PADDING_HUGE]], i32 0, i32 0, i32 0), i{{(32|64)}} 32768, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[UNINIT_PADDING_HUGE]]{{.*}}, i{{(32|64)}} 32768, i1 false) X } @@ -50,6 +50,6 @@ pub const fn uninit_padding_huge() -> [(u32, u8); 4096] { #[no_mangle] pub const fn fully_uninit_huge() -> MaybeUninit<[u32; 4096]> { const F: MaybeUninit<[u32; 4096]> = MaybeUninit::uninit(); - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{(32|64)}}(i8* align 4 %1, i8* align 4 getelementptr inbounds (<{ [16384 x i8] }>, <{ [16384 x i8] }>* [[FULLY_UNINIT_HUGE]], i32 0, i32 0, i32 0), i{{(32|64)}} 16384, i1 false) + // CHECK: call void @llvm.memcpy.{{.+}}({{i8\*|ptr}} align 4 %{{[0-9]+}}, {{i8\*|ptr}} align 4 {{.*}}[[FULLY_UNINIT_HUGE]]{{.*}}, i{{(32|64)}} 16384, i1 false) F } diff --git a/src/test/codegen/union-abi.rs b/src/test/codegen/union-abi.rs index 01f917b291094..c18f2a49fc369 100644 --- a/src/test/codegen/union-abi.rs +++ b/src/test/codegen/union-abi.rs @@ -17,25 +17,25 @@ pub struct i64x4(i64, i64, i64, i64); #[derive(Copy, Clone)] pub union UnionI64x4{ a:(), b: i64x4 } -// CHECK: define void @test_UnionI64x4(<4 x i64>* {{.*}} %_1) +// CHECK: define void @test_UnionI64x4({{<4 x i64>\*|ptr}} {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4(_: UnionI64x4) { loop {} } pub union UnionI64x4_{ a: i64x4, b: (), c:i64x4, d: Unhab, e: ((),()), f: UnionI64x4 } -// CHECK: define void @test_UnionI64x4_(<4 x i64>* {{.*}} %_1) +// CHECK: define void @test_UnionI64x4_({{<4 x i64>\*|ptr}} {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4_(_: UnionI64x4_) { loop {} } pub union UnionI64x4I64{ a: i64x4, b: i64 } -// CHECK: define void @test_UnionI64x4I64(%UnionI64x4I64* {{.*}} %_1) +// CHECK: define void @test_UnionI64x4I64({{%UnionI64x4I64\*|ptr}} {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4I64(_: UnionI64x4I64) { loop {} } pub union UnionI64x4Tuple{ a: i64x4, b: (i64, i64, i64, i64) } -// CHECK: define void @test_UnionI64x4Tuple(%UnionI64x4Tuple* {{.*}} %_1) +// CHECK: define void @test_UnionI64x4Tuple({{%UnionI64x4Tuple\*|ptr}} {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4Tuple(_: UnionI64x4Tuple) { loop {} } @@ -65,7 +65,7 @@ pub fn test_UnionU128(_: UnionU128) -> UnionU128 { loop {} } #[repr(C)] pub union CUnionU128{a:u128} -// CHECK: define void @test_CUnionU128(%CUnionU128* {{.*}} %_1) +// CHECK: define void @test_CUnionU128({{%CUnionU128\*|ptr}} {{.*}} %_1) #[no_mangle] pub fn test_CUnionU128(_: CUnionU128) { loop {} } diff --git a/src/test/codegen/used_with_arg.rs b/src/test/codegen/used_with_arg.rs index 5bff50a40d4e8..4515cb2aed0c1 100644 --- a/src/test/codegen/used_with_arg.rs +++ b/src/test/codegen/used_with_arg.rs @@ -1,10 +1,10 @@ #![crate_type = "lib"] #![feature(used_with_arg)] -// CHECK: @llvm.used = appending global [1 x i8*]{{.*}}USED_LINKER +// CHECK: @llvm.used = appending global {{.*}}USED_LINKER #[used(linker)] static mut USED_LINKER: [usize; 1] = [0]; -// CHECK-NEXT: @llvm.compiler.used = appending global [1 x i8*]{{.*}}USED_COMPILER +// CHECK-NEXT: @llvm.compiler.used = appending global {{.*}}USED_COMPILER #[used(compiler)] static mut USED_COMPILER: [usize; 1] = [0]; diff --git a/src/test/codegen/zst-offset.rs b/src/test/codegen/zst-offset.rs index 0c015fca3253a..29d2a1754a3af 100644 --- a/src/test/codegen/zst-offset.rs +++ b/src/test/codegen/zst-offset.rs @@ -13,8 +13,7 @@ pub fn helper(_: usize) { // CHECK-LABEL: @scalar_layout #[no_mangle] pub fn scalar_layout(s: &(u64, ())) { -// CHECK: [[X0:%[0-9]+]] = bitcast i64* %s to i8* -// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 8 +// CHECK: getelementptr i8, {{.+}}, [[USIZE]] 8 let x = &s.1; &x; // keep variable in an alloca } @@ -23,8 +22,7 @@ pub fn scalar_layout(s: &(u64, ())) { // CHECK-LABEL: @scalarpair_layout #[no_mangle] pub fn scalarpair_layout(s: &(u64, u32, ())) { -// CHECK: [[X0:%[0-9]+]] = bitcast { i64, i32 }* %s to i8* -// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 12 +// CHECK: getelementptr i8, {{.+}}, [[USIZE]] 12 let x = &s.2; &x; // keep variable in an alloca } @@ -36,8 +34,7 @@ pub struct U64x4(u64, u64, u64, u64); // CHECK-LABEL: @vector_layout #[no_mangle] pub fn vector_layout(s: &(U64x4, ())) { -// CHECK: [[X0:%[0-9]+]] = bitcast <4 x i64>* %s to i8* -// CHECK-NEXT: [[X1:%[0-9]+]] = getelementptr i8, i8* [[X0]], [[USIZE]] 32 +// CHECK: getelementptr i8, {{.+}}, [[USIZE]] 32 let x = &s.1; &x; // keep variable in an alloca } diff --git a/src/test/run-make-fulldeps/coverage-llvmir/filecheck.testprog.txt b/src/test/run-make-fulldeps/coverage-llvmir/filecheck.testprog.txt index 84e67e53ea4c6..7a5f219227701 100644 --- a/src/test/run-make-fulldeps/coverage-llvmir/filecheck.testprog.txt +++ b/src/test/run-make-fulldeps/coverage-llvmir/filecheck.testprog.txt @@ -29,20 +29,20 @@ CHECK: @__llvm_prf_nm = private constant CHECK-SAME: section "[[INSTR_PROF_NAME]]", align 1 CHECK: @llvm.used = appending global -CHECK-SAME: i8* bitcast ({ {{.*}} }* @__llvm_coverage_mapping to i8*) -CHECK-SAME: i8* getelementptr inbounds ({{.*}}* @__llvm_prf_nm, i32 0, i32 0) +CHECK-SAME: @__llvm_coverage_mapping +CHECK-SAME: @__llvm_prf_nm CHECK-SAME: section "llvm.metadata" CHECK: [[DEFINE_INTERNAL]] { {{.*}} } @_R{{[a-zA-Z0-9_]+}}testprog14will_be_called() unnamed_addr #{{[0-9]+}} { CHECK-NEXT: start: CHECK-NOT: [[DEFINE_INTERNAL]] -CHECK: %pgocount = load i64, i64* getelementptr inbounds -CHECK-SAME: * @__profc__R{{[a-zA-Z0-9_]+}}testprog14will_be_called, +CHECK: %pgocount = load i64, {{i64\*|ptr}} +CHECK-SAME: @__profc__R{{[a-zA-Z0-9_]+}}testprog14will_be_called, -CHECK: declare void @llvm.instrprof.increment(i8*, i64, i32, i32) #[[LLVM_INSTRPROF_INCREMENT_ATTR:[0-9]+]] +CHECK: declare void @llvm.instrprof.increment({{i8\*|ptr}}, i64, i32, i32) #[[LLVM_INSTRPROF_INCREMENT_ATTR:[0-9]+]] WINDOWS: define linkonce_odr hidden i32 @__llvm_profile_runtime_user() #[[LLVM_PROFILE_RUNTIME_USER_ATTR:[0-9]+]] comdat { -WINDOWS-NEXT: %1 = load i32, i32* @__llvm_profile_runtime +WINDOWS-NEXT: %1 = load i32, {{i32\*|ptr}} @__llvm_profile_runtime WINDOWS-NEXT: ret i32 %1 WINDOWS-NEXT: } diff --git a/src/test/run-make-fulldeps/pgo-indirect-call-promotion/filecheck-patterns.txt b/src/test/run-make-fulldeps/pgo-indirect-call-promotion/filecheck-patterns.txt index 934159207e2d1..e19c78350e9d9 100644 --- a/src/test/run-make-fulldeps/pgo-indirect-call-promotion/filecheck-patterns.txt +++ b/src/test/run-make-fulldeps/pgo-indirect-call-promotion/filecheck-patterns.txt @@ -2,7 +2,7 @@ CHECK: define void @call_a_bunch_of_functions({{.*}} { # Make sure that indirect call promotion inserted a check against the most # frequently called function. -CHECK: %{{.*}} = icmp eq void ()* %{{.*}}, @function_called_always +CHECK: %{{.*}} = icmp eq {{void \(\)\*|ptr}} %{{.*}}, @function_called_always # Check that the call to `function_called_always` was inlined, so that we # directly call `opaque_f1` from the upstream crate. @@ -12,5 +12,5 @@ CHECK: call void @opaque_f1() # Same checks as above, repeated for the trait object case CHECK: define void @call_a_bunch_of_trait_methods({{.*}} -CHECK: %{{.*}} = icmp eq void ({}*)* %{{.*}}, {{.*}} @foo +CHECK: %{{.*}} = icmp eq {{void \(\{\}\*\)\*|ptr}} %{{.*}}, {{.*}}@foo CHECK: tail call void @opaque_f2() From 6dc4fe5fe8815b7a64cd58ae3f5a006b946e3330 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Thu, 28 Apr 2022 11:52:04 +0200 Subject: [PATCH 4/5] Set LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN for lld build as well --- src/bootstrap/native.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index 09b8cbe9014d4..2f3236e720af6 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -949,6 +949,10 @@ impl Step for Lld { .define("LLVM_CONFIG_PATH", llvm_config_shim) .define("LLVM_INCLUDE_TESTS", "OFF"); + if builder.config.llvm_allow_old_toolchain { + cfg.define("LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN", "YES"); + } + // While we're using this horrible workaround to shim the execution of // llvm-config, let's just pile on more. I can't seem to figure out how // to build LLD as a standalone project and also cross-compile it at the From 1ff051a9c53511473a12a37fe772e8c5f3e3e32b Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Sat, 28 May 2022 10:41:28 +0200 Subject: [PATCH 5/5] Fix documentation of basic stack protector A stack protector is used for N >= 8, not N > 8. --- compiler/rustc_codegen_llvm/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 2b5154a2cf977..913cf4eea13a3 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -304,8 +304,8 @@ impl CodegenBackend for LlvmCodegenBackend { local stack variable in the ABI.) basic - Generate stack canaries in functions with: - - local variables of `[T; N]` type, where `T` is byte-sized and `N` > 8. + Generate stack canaries in functions with local variables of `[T; N]` + type, where `T` is byte-sized and `N` >= 8. none Do not generate stack canaries.