]>
git.proxmox.com Git - rustc.git/blob - src/libcompiler_builtins/src/float/cmp.rs
1 #![allow(unreachable_code)]
3 use int
::{Int, CastInto}
;
15 fn to_le_abi(self) -> i32 {
20 Result
::Unordered
=> 1
24 fn to_ge_abi(self) -> i32 {
29 Result
::Unordered
=> -1
34 fn cmp
<F
: Float
>(a
: F
, b
: F
) -> Result
where
35 u32: CastInto
<F
::Int
>,
36 F
::Int
: CastInto
<u32>,
37 i32: CastInto
<F
::Int
>,
38 F
::Int
: CastInto
<i32>,
40 let one
= F
::Int
::ONE
;
41 let zero
= F
::Int
::ZERO
;
42 let szero
= F
::SignedInt
::ZERO
;
44 let sign_bit
= F
::SIGN_MASK
as F
::Int
;
45 let abs_mask
= sign_bit
- one
;
46 let exponent_mask
= F
::EXPONENT_MASK
;
47 let inf_rep
= exponent_mask
;
51 let a_abs
= a_rep
& abs_mask
;
52 let b_abs
= b_rep
& abs_mask
;
54 // If either a or b is NaN, they are unordered.
55 if a_abs
> inf_rep
|| b_abs
> inf_rep
{
56 return Result
::Unordered
59 // If a and b are both zeros, they are equal.
60 if a_abs
| b_abs
== zero
{
64 let a_srep
= a
.signed_repr();
65 let b_srep
= b
.signed_repr();
67 // If at least one of a and b is positive, we get the same result comparing
68 // a and b as signed integers as we would with a fp_ting-point compare.
69 if a_srep
& b_srep
>= szero
{
72 } else if a_srep
== b_srep
{
75 return Result
::Greater
79 // Otherwise, both are negative, so we need to flip the sense of the
80 // comparison to get the correct result. (This assumes a twos- or ones-
81 // complement integer representation; if integers are represented in a
82 // sign-magnitude representation, then this flip is incorrect).
86 } else if a_srep
== b_srep
{
89 return Result
::Greater
93 fn unord
<F
: Float
>(a
: F
, b
: F
) -> bool
where
94 u32: CastInto
<F
::Int
>,
95 F
::Int
: CastInto
<u32>,
96 i32: CastInto
<F
::Int
>,
97 F
::Int
: CastInto
<i32>,
99 let one
= F
::Int
::ONE
;
101 let sign_bit
= F
::SIGN_MASK
as F
::Int
;
102 let abs_mask
= sign_bit
- one
;
103 let exponent_mask
= F
::EXPONENT_MASK
;
104 let inf_rep
= exponent_mask
;
106 let a_rep
= a
.repr();
107 let b_rep
= b
.repr();
108 let a_abs
= a_rep
& abs_mask
;
109 let b_abs
= b_rep
& abs_mask
;
111 a_abs
> inf_rep
|| b_abs
> inf_rep
115 pub extern "C" fn __lesf2(a
: f32, b
: f32) -> i32 {
116 cmp(a
, b
).to_le_abi()
119 pub extern "C" fn __gesf2(a
: f32, b
: f32) -> i32 {
120 cmp(a
, b
).to_ge_abi()
123 #[arm_aeabi_alias = __aeabi_fcmpun]
124 pub extern "C" fn __unordsf2(a
: f32, b
: f32) -> i32 {
128 pub extern "C" fn __eqsf2(a
: f32, b
: f32) -> i32 {
129 cmp(a
, b
).to_le_abi()
132 pub extern "C" fn __ltsf2(a
: f32, b
: f32) -> i32 {
133 cmp(a
, b
).to_le_abi()
136 pub extern "C" fn __nesf2(a
: f32, b
: f32) -> i32 {
137 cmp(a
, b
).to_le_abi()
140 pub extern "C" fn __gtsf2(a
: f32, b
: f32) -> i32 {
141 cmp(a
, b
).to_ge_abi()
144 pub extern "C" fn __ledf2(a
: f64, b
: f64) -> i32 {
145 cmp(a
, b
).to_le_abi()
148 pub extern "C" fn __gedf2(a
: f64, b
: f64) -> i32 {
149 cmp(a
, b
).to_ge_abi()
152 #[arm_aeabi_alias = __aeabi_dcmpun]
153 pub extern "C" fn __unorddf2(a
: f64, b
: f64) -> i32 {
157 pub extern "C" fn __eqdf2(a
: f64, b
: f64) -> i32 {
158 cmp(a
, b
).to_le_abi()
161 pub extern "C" fn __ltdf2(a
: f64, b
: f64) -> i32 {
162 cmp(a
, b
).to_le_abi()
165 pub extern "C" fn __nedf2(a
: f64, b
: f64) -> i32 {
166 cmp(a
, b
).to_le_abi()
169 pub extern "C" fn __gtdf2(a
: f64, b
: f64) -> i32 {
170 cmp(a
, b
).to_ge_abi()
174 #[cfg(target_arch = "arm")]
176 pub extern "aapcs" fn __aeabi_fcmple(a
: f32, b
: f32) -> i32 {
177 (__lesf2(a
, b
) <= 0) as i32
180 pub extern "aapcs" fn __aeabi_fcmpge(a
: f32, b
: f32) -> i32 {
181 (__gesf2(a
, b
) >= 0) as i32
184 pub extern "aapcs" fn __aeabi_fcmpeq(a
: f32, b
: f32) -> i32 {
185 (__eqsf2(a
, b
) == 0) as i32
188 pub extern "aapcs" fn __aeabi_fcmplt(a
: f32, b
: f32) -> i32 {
189 (__ltsf2(a
, b
) < 0) as i32
192 pub extern "aapcs" fn __aeabi_fcmpgt(a
: f32, b
: f32) -> i32 {
193 (__gtsf2(a
, b
) > 0) as i32
196 pub extern "aapcs" fn __aeabi_dcmple(a
: f64, b
: f64) -> i32 {
197 (__ledf2(a
, b
) <= 0) as i32
200 pub extern "aapcs" fn __aeabi_dcmpge(a
: f64, b
: f64) -> i32 {
201 (__gedf2(a
, b
) >= 0) as i32
204 pub extern "aapcs" fn __aeabi_dcmpeq(a
: f64, b
: f64) -> i32 {
205 (__eqdf2(a
, b
) == 0) as i32
208 pub extern "aapcs" fn __aeabi_dcmplt(a
: f64, b
: f64) -> i32 {
209 (__ltdf2(a
, b
) < 0) as i32
212 pub extern "aapcs" fn __aeabi_dcmpgt(a
: f64, b
: f64) -> i32 {
213 (__gtdf2(a
, b
) > 0) as i32
216 // On hard-float targets LLVM will use native instructions
217 // for all VFP intrinsics below
219 pub extern "C" fn __gesf2vfp(a
: f32, b
: f32) -> i32 {
223 pub extern "C" fn __gedf2vfp(a
: f64, b
: f64) -> i32 {
227 pub extern "C" fn __gtsf2vfp(a
: f32, b
: f32) -> i32 {
231 pub extern "C" fn __gtdf2vfp(a
: f64, b
: f64) -> i32 {
235 pub extern "C" fn __ltsf2vfp(a
: f32, b
: f32) -> i32 {
239 pub extern "C" fn __ltdf2vfp(a
: f64, b
: f64) -> i32 {
243 pub extern "C" fn __lesf2vfp(a
: f32, b
: f32) -> i32 {
247 pub extern "C" fn __ledf2vfp(a
: f64, b
: f64) -> i32 {
251 pub extern "C" fn __nesf2vfp(a
: f32, b
: f32) -> i32 {
255 pub extern "C" fn __nedf2vfp(a
: f64, b
: f64) -> i32 {
259 pub extern "C" fn __eqsf2vfp(a
: f32, b
: f32) -> i32 {
263 pub extern "C" fn __eqdf2vfp(a
: f64, b
: f64) -> i32 {