Skip to content

Commit

Permalink
Rollup merge of rust-lang#133102 - RalfJung:aarch64-softfloat, r=davi…
Browse files Browse the repository at this point in the history
…dtwco,wesleywiser

aarch64 softfloat target: always pass floats in int registers

This is a part of rust-lang#131058: on softfloat aarch64 targets, the float registers may be unavailable. And yet, LLVM will happily use them to pass float types if the corresponding target features are enabled. That's a problem as it means enabling/disabling `neon` instructions can change the ABI.

Other targets have a `soft-float` target feature that forces the use of the soft-float ABI no matter whether float registers are enabled or not; aarch64 has nothing like that.

So we follow the aarch64 [softfloat ABI](rust-lang#131058 (comment)) and treat floats like integers for `extern "C"` functions. For the "Rust" ABI, we do the same for scalars, and then just do something reasonable for ScalarPair that avoids the pointer indirection.

Cc ``@workingjubilee``
  • Loading branch information
jieyouxu authored Nov 22, 2024
2 parents db521ab + 666bcbd commit 9975b65
Show file tree
Hide file tree
Showing 3 changed files with 106 additions and 5 deletions.
62 changes: 57 additions & 5 deletions compiler/rustc_target/src/callconv/aarch64.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
use std::iter;

use rustc_abi::{BackendRepr, Primitive};

use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, TyAbiInterface};
use crate::spec::{HasTargetSpec, Target};

/// Indicates the variant of the AArch64 ABI we are compiling for.
/// Used to accommodate Apple and Microsoft's deviations from the usual AAPCS ABI.
Expand All @@ -15,7 +20,7 @@ pub(crate) enum AbiKind {
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
C: HasDataLayout + HasTargetSpec,
{
arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
let size = arg.layout.size;
Expand All @@ -27,18 +32,52 @@ where

let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
// The softfloat ABI treats floats like integers, so they
// do not get homogeneous aggregate treatment.
RegKind::Float => cx.target_spec().abi != "softfloat",
RegKind::Vector => size.bits() == 64 || size.bits() == 128,
};

valid_unit.then_some(Uniform::consecutive(unit, size))
})
}

fn softfloat_float_abi<Ty>(target: &Target, arg: &mut ArgAbi<'_, Ty>) {
if target.abi != "softfloat" {
return;
}
// Do *not* use the float registers for passing arguments, as that would make LLVM pick the ABI
// and its choice depends on whether `neon` instructions are enabled. Instead, we follow the
// AAPCS "softfloat" ABI, which specifies that floats should be passed as equivalently-sized
// integers. Nominally this only exists for "R" profile chips, but sometimes people don't want
// to use hardfloats even if the hardware supports them, so we do this for all softfloat
// targets.
if let BackendRepr::Scalar(s) = arg.layout.backend_repr
&& let Primitive::Float(f) = s.primitive()
{
arg.cast_to(Reg { kind: RegKind::Integer, size: f.size() });
} else if let BackendRepr::ScalarPair(s1, s2) = arg.layout.backend_repr
&& (matches!(s1.primitive(), Primitive::Float(_))
|| matches!(s2.primitive(), Primitive::Float(_)))
{
// This case can only be reached for the Rust ABI, so we can do whatever we want here as
// long as it does not depend on target features (i.e., as long as we do not use float
// registers). So we pass small things in integer registers and large things via pointer
// indirection. This means we lose the nice "pass it as two arguments" optimization, but we
// currently just have to way to combine a `PassMode::Cast` with that optimization (and we
// need a cast since we want to pass the float as an int).
if arg.layout.size.bits() <= target.pointer_width.into() {
arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
} else {
arg.make_indirect();
}
}
}

fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
C: HasDataLayout + HasTargetSpec,
{
if !ret.layout.is_sized() {
// Not touching this...
Expand All @@ -51,6 +90,7 @@ where
// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
ret.extend_integer_width_to(32)
}
softfloat_float_abi(cx.target_spec(), ret);
return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
Expand All @@ -69,7 +109,7 @@ where
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
C: HasDataLayout + HasTargetSpec,
{
if !arg.layout.is_sized() {
// Not touching this...
Expand All @@ -82,6 +122,8 @@ where
// See also: <https://developer.apple.com/documentation/xcode/writing-arm64-code-for-apple-platforms#Pass-Arguments-to-Functions-Correctly>
arg.extend_integer_width_to(32);
}
softfloat_float_abi(cx.target_spec(), arg);

return;
}
if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
Expand Down Expand Up @@ -112,7 +154,7 @@ where
pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, kind: AbiKind)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout,
C: HasDataLayout + HasTargetSpec,
{
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, kind);
Expand All @@ -125,3 +167,13 @@ where
classify_arg(cx, arg, kind);
}
}

pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where
Ty: TyAbiInterface<'a, C> + Copy,
C: HasDataLayout + HasTargetSpec,
{
for arg in fn_abi.args.iter_mut().chain(iter::once(&mut fn_abi.ret)) {
softfloat_float_abi(cx.target_spec(), arg);
}
}
1 change: 1 addition & 0 deletions compiler/rustc_target/src/callconv/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -738,6 +738,7 @@ impl<'a, Ty> FnAbi<'a, Ty> {
"x86" => x86::compute_rust_abi_info(cx, self, abi),
"riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
"loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
"aarch64" => aarch64::compute_rust_abi_info(cx, self),
_ => {}
};

Expand Down
48 changes: 48 additions & 0 deletions tests/codegen/aarch64-softfloat.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
//@ compile-flags: --target aarch64-unknown-none-softfloat -Zmerge-functions=disabled
//@ needs-llvm-components: aarch64
#![crate_type = "lib"]
#![feature(no_core, lang_items)]
#![no_core]

#[lang = "sized"]
trait Sized {}
#[lang = "copy"]
trait Copy {}
impl Copy for f32 {}
impl Copy for f64 {}

// CHECK: i64 @pass_f64_C(i64 {{[^,]*}})
#[no_mangle]
extern "C" fn pass_f64_C(x: f64) -> f64 {
x
}

// CHECK: i64 @pass_f32_pair_C(i64 {{[^,]*}})
#[no_mangle]
extern "C" fn pass_f32_pair_C(x: (f32, f32)) -> (f32, f32) {
x
}

// CHECK: [2 x i64] @pass_f64_pair_C([2 x i64] {{[^,]*}})
#[no_mangle]
extern "C" fn pass_f64_pair_C(x: (f64, f64)) -> (f64, f64) {
x
}

// CHECK: i64 @pass_f64_Rust(i64 {{[^,]*}})
#[no_mangle]
fn pass_f64_Rust(x: f64) -> f64 {
x
}

// CHECK: i64 @pass_f32_pair_Rust(i64 {{[^,]*}})
#[no_mangle]
fn pass_f32_pair_Rust(x: (f32, f32)) -> (f32, f32) {
x
}

// CHECK: void @pass_f64_pair_Rust(ptr {{[^,]*}}, ptr {{[^,]*}})
#[no_mangle]
fn pass_f64_pair_Rust(x: (f64, f64)) -> (f64, f64) {
x
}

0 comments on commit 9975b65

Please sign in to comment.