From e9214a147b09f8020f82b450e7c9e16290649909 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 12 Sep 2019 19:04:30 +0300 Subject: [PATCH] codegen: be more explicit about setting giving names to allocas. --- src/librustc_codegen_llvm/abi.rs | 2 +- src/librustc_codegen_llvm/builder.rs | 23 +++++-------------- src/librustc_codegen_llvm/intrinsic.rs | 2 +- src/librustc_codegen_ssa/mir/block.rs | 18 +++++++-------- src/librustc_codegen_ssa/mir/mod.rs | 26 +++++++++++++--------- src/librustc_codegen_ssa/mir/operand.rs | 2 +- src/librustc_codegen_ssa/mir/place.rs | 8 ++----- src/librustc_codegen_ssa/mir/rvalue.rs | 2 +- src/librustc_codegen_ssa/traits/builder.rs | 5 ++--- src/test/codegen/personality_lifetimes.rs | 9 ++++---- 10 files changed, 42 insertions(+), 55 deletions(-) diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index ff87afe0c444b..2ca517dc3b1a7 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -229,7 +229,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // We instead thus allocate some scratch space... let scratch_size = cast.size(bx); let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align); + let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index e13a5ecc2ebfd..423a01ad1f937 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -387,23 +387,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } - fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - bx.dynamic_alloca(ty, name, align) + bx.dynamic_alloca(ty, align) } - fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildAlloca(self.llbuilder, ty, - name.as_ptr()) - }; + let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } @@ -412,16 +406,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, - name: &str, align: Align) -> &'ll Value { unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, - name.as_ptr()) - }; + let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index fc0b9ffd11d83..5fbfe9138f2a4 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -871,7 +871,7 @@ fn codegen_msvc_try( // More information can be found in libstd's seh.rs implementation. let i64p = bx.type_ptr_to(bx.type_i64()); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(i64p, "slot", ptr_align); + let slot = bx.alloca(i64p, ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); normal.ret(bx.const_i32(0)); diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index c41e46398467a..d3c59c526c8ad 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -276,7 +276,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llslot = match op.val { Immediate(_) | Pair(..) => { let scratch = - PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout); op.val.store(&mut bx, scratch); scratch.llval } @@ -767,7 +767,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + let tmp = PlaceRef::alloca(&mut bx, op.layout); op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } @@ -925,7 +925,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Immediate(_) | Pair(..) => { match arg.mode { PassMode::Indirect(..) | PassMode::Cast(_) => { - let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + let scratch = PlaceRef::alloca(bx, arg.layout); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) } @@ -940,7 +940,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + let scratch = PlaceRef::alloca(bx, arg.layout); base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align, op.layout, MemFlags::empty()); (scratch.llval, scratch.align, true) @@ -1017,7 +1017,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32 ])); - let slot = PlaceRef::alloca(bx, layout, "personalityslot"); + let slot = PlaceRef::alloca(bx, layout); self.personality_slot = Some(slot); slot } @@ -1116,7 +1116,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return if fn_ret.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + let tmp = PlaceRef::alloca(bx, fn_ret.layout); tmp.storage_live(bx); llargs.push(tmp.llval); ReturnDest::IndirectOperand(tmp, index) @@ -1124,7 +1124,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Currently, intrinsics always need a location to store // the result, so we create a temporary `alloca` for the // result. - let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + let tmp = PlaceRef::alloca(bx, fn_ret.layout); tmp.storage_live(bx); ReturnDest::IndirectOperand(tmp, index) } else { @@ -1174,7 +1174,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::Operand(None) => { let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref())); assert!(!dst_layout.ty.has_erasable_regions()); - let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); + let place = PlaceRef::alloca(bx, dst_layout); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); let op = bx.load_operand(place); @@ -1227,7 +1227,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if let PassMode::Cast(_) = ret_ty.mode { - let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); + let tmp = PlaceRef::alloca(bx, ret_ty.layout); tmp.storage_live(bx); bx.store_arg_ty(&ret_ty, llval, tmp); let op = bx.load_operand(tmp); diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 00e9ca01f4dd2..df72980383e29 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -268,11 +268,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( debug!("alloc: {:?} ({}) -> place", local, name); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str()); + PlaceRef::alloca_unsized_indirect(&mut bx, layout); + bx.set_var_name(indirect_place.llval, name); // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); + let place = PlaceRef::alloca(&mut bx, layout); + bx.set_var_name(place.llval, name); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, @@ -293,14 +295,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { - let indirect_place = PlaceRef::alloca_unsized_indirect( - &mut bx, - layout, - &format!("{:?}", local), - ); + let indirect_place = PlaceRef::alloca_unsized_indirect(&mut bx, layout); + bx.set_var_name(indirect_place.llval, format_args!("{:?}", local)); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) + let place = PlaceRef::alloca(&mut bx, layout); + bx.set_var_name(place.llval, format_args!("{:?}", local)); + LocalRef::Place(place) } } else { // If this is an immediate local, we do not create an @@ -470,7 +471,8 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); + bx.set_var_name(place.llval, name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; @@ -558,11 +560,13 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); - let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); + let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout); + bx.set_var_name(tmp.llval, name); indirect_operand.store(bx, tmp); tmp } else { - let tmp = PlaceRef::alloca(bx, arg.layout, &name); + let tmp = PlaceRef::alloca(bx, arg.layout); + bx.set_var_name(tmp.llval, name); if fx.fn_ty.c_variadic && last_arg_idx.map(|idx| arg_index == idx).unwrap_or(false) { let va_list_did = match tcx.lang_items().va_list() { Some(did) => did, diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index a8ab3ea10ed16..aa85d0284f7f9 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -367,7 +367,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align); bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index b8e10d3430292..d3e6cef8195d7 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -71,11 +71,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn alloca>( bx: &mut Bx, layout: TyLayout<'tcx>, - name: &str ) -> Self { - debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); Self::new_sized(tmp, layout) } @@ -83,13 +81,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn alloca_unsized_indirect>( bx: &mut Bx, layout: TyLayout<'tcx>, - name: &str, ) -> Self { - debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); let ptr_layout = bx.cx().layout_of(ptr_ty); - Self::alloca(bx, ptr_layout, name) + Self::alloca(bx, ptr_layout) } pub fn len>( diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index e0ad2527229ba..da8a7971107be 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -64,7 +64,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + let scratch = PlaceRef::alloca(&mut bx, operand.layout); scratch.storage_live(&mut bx); operand.val.store(&mut bx, scratch); base::coerce_unsized_into(&mut bx, scratch, dest); diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 3a144f0b0e0aa..1886701fb3a88 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -109,13 +109,12 @@ pub trait BuilderMethods<'a, 'tcx>: rhs: Self::Value, ) -> (Self::Value, Self::Value); - fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; - fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; + fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn array_alloca( &mut self, ty: Self::Type, len: Self::Value, - name: &str, align: Align, ) -> Self::Value; diff --git a/src/test/codegen/personality_lifetimes.rs b/src/test/codegen/personality_lifetimes.rs index 0d3d537a2723d..05888c0e733ad 100644 --- a/src/test/codegen/personality_lifetimes.rs +++ b/src/test/codegen/personality_lifetimes.rs @@ -20,12 +20,13 @@ pub fn test() { let _s = S; // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just // in the first one. + // CHECK: [[SLOT:%[0-9]+]] = alloca { i8*, i32 } // CHECK-LABEL: cleanup: - // CHECK: bitcast{{.*}}personalityslot - // CHECK-NEXT: call void @llvm.lifetime.start + // CHECK: [[BITCAST:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8* + // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST]]) // CHECK-LABEL: cleanup1: - // CHECK: bitcast{{.*}}personalityslot - // CHECK-NEXT: call void @llvm.lifetime.start + // CHECK: [[BITCAST1:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8* + // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST1]]) might_unwind(); let _t = S; might_unwind();