Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add missing functions for f16 and f128 #587

Closed
wants to merge 13 commits into from
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -232,9 +232,9 @@ These builtins are needed to support 128-bit integers.

These builtins are needed to support `f16` and `f128`, which are in the process of being added to Rust.

- [ ] addtf3.c
- [ ] comparetf2.c
- [ ] divtf3.c
- [x] addtf3.c
- [x] comparetf2.c
- [x] divtf3.c
- [x] extenddftf2.c
- [x] extendhfsf2.c
- [x] extendhftf2.c
Expand All @@ -249,13 +249,13 @@ These builtins are needed to support `f16` and `f128`, which are in the process
- [ ] floatsitf.c
- [ ] floatunditf.c
- [ ] floatunsitf.c
- [ ] multf3.c
- [x] multf3.c
- [ ] powitf2.c
- [ ] ppc/fixtfdi.c
- [ ] ppc/fixunstfdi.c
- [ ] ppc/floatditf.c
- [ ] ppc/floatunditf.c
- [ ] subtf3.c
- [x] subtf3.c
- [x] truncdfhf2.c
- [x] truncsfhf2.c
- [x] trunctfdf2.c
Expand Down
12 changes: 0 additions & 12 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -479,10 +479,6 @@ mod c {
("__floatsitf", "floatsitf.c"),
("__floatunditf", "floatunditf.c"),
("__floatunsitf", "floatunsitf.c"),
("__addtf3", "addtf3.c"),
("__multf3", "multf3.c"),
("__subtf3", "subtf3.c"),
("__divtf3", "divtf3.c"),
("__powitf2", "powitf2.c"),
("__fe_getround", "fp_mode.c"),
("__fe_raise_inexact", "fp_mode.c"),
Expand All @@ -500,30 +496,22 @@ mod c {
if target_arch == "mips64" {
sources.extend(&[
("__netf2", "comparetf2.c"),
("__addtf3", "addtf3.c"),
("__multf3", "multf3.c"),
("__subtf3", "subtf3.c"),
("__fixtfsi", "fixtfsi.c"),
("__floatsitf", "floatsitf.c"),
("__fixunstfsi", "fixunstfsi.c"),
("__floatunsitf", "floatunsitf.c"),
("__fe_getround", "fp_mode.c"),
("__divtf3", "divtf3.c"),
]);
}

if target_arch == "loongarch64" {
sources.extend(&[
("__netf2", "comparetf2.c"),
("__addtf3", "addtf3.c"),
("__multf3", "multf3.c"),
("__subtf3", "subtf3.c"),
("__fixtfsi", "fixtfsi.c"),
("__floatsitf", "floatsitf.c"),
("__fixunstfsi", "fixunstfsi.c"),
("__floatunsitf", "floatunsitf.c"),
("__fe_getround", "fp_mode.c"),
("__divtf3", "divtf3.c"),
]);
}

Expand Down
27 changes: 16 additions & 11 deletions src/float/add.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::float::Float;
use crate::int::{CastInto, Int};
use crate::int::{CastInto, Int, MinInt};

/// Returns `a + b`
fn add<F: Float>(a: F, b: F) -> F
Expand Down Expand Up @@ -57,17 +57,17 @@ where
}

// zero + anything = anything
if a_abs == Int::ZERO {
if a_abs == MinInt::ZERO {
// but we need to get the sign right for zero + zero
if b_abs == Int::ZERO {
if b_abs == MinInt::ZERO {
return F::from_repr(a.repr() & b.repr());
} else {
return b;
}
}

// anything + zero = anything
if b_abs == Int::ZERO {
if b_abs == MinInt::ZERO {
return a;
}
}
Expand Down Expand Up @@ -113,10 +113,10 @@ where
// Shift the significand of b by the difference in exponents, with a sticky
// bottom bit to get rounding correct.
let align = a_exponent.wrapping_sub(b_exponent).cast();
if align != Int::ZERO {
if align != MinInt::ZERO {
if align < bits {
let sticky =
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO);
F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != MinInt::ZERO);
b_significand = (b_significand >> align.cast()) | sticky;
} else {
b_significand = one; // sticky; b is known to be non-zero.
Expand All @@ -125,8 +125,8 @@ where
if subtraction {
a_significand = a_significand.wrapping_sub(b_significand);
// If a == -b, return +zero.
if a_significand == Int::ZERO {
return F::from_repr(Int::ZERO);
if a_significand == MinInt::ZERO {
return F::from_repr(MinInt::ZERO);
}

// If partial cancellation occured, we need to left-shift the result
Expand All @@ -143,8 +143,8 @@ where

// If the addition carried up, we need to right-shift the result and
// adjust the exponent:
if a_significand & implicit_bit << 4 != Int::ZERO {
let sticky = F::Int::from_bool(a_significand & one != Int::ZERO);
if a_significand & implicit_bit << 4 != MinInt::ZERO {
let sticky = F::Int::from_bool(a_significand & one != MinInt::ZERO);
a_significand = a_significand >> 1 | sticky;
a_exponent += 1;
}
Expand All @@ -160,7 +160,7 @@ where
// need to shift the significand.
let shift = (1 - a_exponent).cast();
let sticky =
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO);
F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != MinInt::ZERO);
a_significand = a_significand >> shift.cast() | sticky;
a_exponent = 0;
}
Expand Down Expand Up @@ -203,6 +203,11 @@ intrinsics! {
add(a, b)
}

#[cfg(not(feature = "no-f16-f128"))]
pub extern "C" fn __addtf3(a: f128, b: f128) -> f128 {
add(a, b)
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will unconditionally codegen f16 and f128 usage, breaking cg_clif and cg_gcc. Maybe add a cargo feature to disable these new intrinsics?


#[cfg(target_arch = "arm")]
pub extern "C" fn __addsf3vfp(a: f32, b: f32) -> f32 {
a + b
Expand Down
40 changes: 39 additions & 1 deletion src/float/cmp.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#![allow(unreachable_code)]

use crate::float::Float;
use crate::int::Int;
use crate::int::MinInt;

#[derive(Clone, Copy)]
enum Result {
Expand Down Expand Up @@ -172,6 +172,44 @@ intrinsics! {
}
}

#[cfg(not(feature = "no-f16-f128"))]
intrinsics! {
#[avr_skip]
pub extern "C" fn __letf2(a: f128, b: f128) -> i32 {
cmp(a, b).to_le_abi()
}

#[avr_skip]
pub extern "C" fn __getf2(a: f128, b: f128) -> i32 {
cmp(a, b).to_ge_abi()
}

#[avr_skip]
pub extern "C" fn __unordtf2(a: f128, b: f128) -> i32 {
unord(a, b) as i32
}

#[avr_skip]
pub extern "C" fn __eqtf2(a: f128, b: f128) -> i32 {
cmp(a, b).to_le_abi()
}

#[avr_skip]
pub extern "C" fn __lttf2(a: f128, b: f128) -> i32 {
cmp(a, b).to_le_abi()
}

#[avr_skip]
pub extern "C" fn __netf2(a: f128, b: f128) -> i32 {
cmp(a, b).to_le_abi()
}

#[avr_skip]
pub extern "C" fn __gttf2(a: f128, b: f128) -> i32 {
cmp(a, b).to_ge_abi()
}
}

#[cfg(target_arch = "arm")]
intrinsics! {
pub extern "aapcs" fn __aeabi_fcmple(a: f32, b: f32) -> i32 {
Expand Down
Loading
Loading