]>
git.proxmox.com Git - rustc.git/blob - vendor/compiler_builtins/src/float/cmp.rs
1 #![allow(unreachable_code)]
15 fn to_le_abi(self) -> i32 {
20 Result
::Unordered
=> 1,
24 fn to_ge_abi(self) -> i32 {
29 Result
::Unordered
=> -1,
34 fn cmp
<F
: Float
>(a
: F
, b
: F
) -> Result
{
35 let one
= F
::Int
::ONE
;
36 let zero
= F
::Int
::ZERO
;
37 let szero
= F
::SignedInt
::ZERO
;
39 let sign_bit
= F
::SIGN_MASK
as F
::Int
;
40 let abs_mask
= sign_bit
- one
;
41 let exponent_mask
= F
::EXPONENT_MASK
;
42 let inf_rep
= exponent_mask
;
46 let a_abs
= a_rep
& abs_mask
;
47 let b_abs
= b_rep
& abs_mask
;
49 // If either a or b is NaN, they are unordered.
50 if a_abs
> inf_rep
|| b_abs
> inf_rep
{
51 return Result
::Unordered
;
54 // If a and b are both zeros, they are equal.
55 if a_abs
| b_abs
== zero
{
59 let a_srep
= a
.signed_repr();
60 let b_srep
= b
.signed_repr();
62 // If at least one of a and b is positive, we get the same result comparing
63 // a and b as signed integers as we would with a fp_ting-point compare.
64 if a_srep
& b_srep
>= szero
{
67 } else if a_srep
== b_srep
{
72 // Otherwise, both are negative, so we need to flip the sense of the
73 // comparison to get the correct result. (This assumes a twos- or ones-
74 // complement integer representation; if integers are represented in a
75 // sign-magnitude representation, then this flip is incorrect).
76 } else if a_srep
> b_srep
{
78 } else if a_srep
== b_srep
{
85 fn unord
<F
: Float
>(a
: F
, b
: F
) -> bool
{
86 let one
= F
::Int
::ONE
;
88 let sign_bit
= F
::SIGN_MASK
as F
::Int
;
89 let abs_mask
= sign_bit
- one
;
90 let exponent_mask
= F
::EXPONENT_MASK
;
91 let inf_rep
= exponent_mask
;
95 let a_abs
= a_rep
& abs_mask
;
96 let b_abs
= b_rep
& abs_mask
;
98 a_abs
> inf_rep
|| b_abs
> inf_rep
102 pub extern "C" fn __lesf2(a
: f32, b
: f32) -> i32 {
103 cmp(a
, b
).to_le_abi()
106 pub extern "C" fn __gesf2(a
: f32, b
: f32) -> i32 {
107 cmp(a
, b
).to_ge_abi()
110 #[arm_aeabi_alias = __aeabi_fcmpun]
111 pub extern "C" fn __unordsf2(a
: f32, b
: f32) -> i32 {
115 pub extern "C" fn __eqsf2(a
: f32, b
: f32) -> i32 {
116 cmp(a
, b
).to_le_abi()
119 pub extern "C" fn __ltsf2(a
: f32, b
: f32) -> i32 {
120 cmp(a
, b
).to_le_abi()
123 pub extern "C" fn __nesf2(a
: f32, b
: f32) -> i32 {
124 cmp(a
, b
).to_le_abi()
127 pub extern "C" fn __gtsf2(a
: f32, b
: f32) -> i32 {
128 cmp(a
, b
).to_ge_abi()
131 pub extern "C" fn __ledf2(a
: f64, b
: f64) -> i32 {
132 cmp(a
, b
).to_le_abi()
135 pub extern "C" fn __gedf2(a
: f64, b
: f64) -> i32 {
136 cmp(a
, b
).to_ge_abi()
139 #[arm_aeabi_alias = __aeabi_dcmpun]
140 pub extern "C" fn __unorddf2(a
: f64, b
: f64) -> i32 {
144 pub extern "C" fn __eqdf2(a
: f64, b
: f64) -> i32 {
145 cmp(a
, b
).to_le_abi()
148 pub extern "C" fn __ltdf2(a
: f64, b
: f64) -> i32 {
149 cmp(a
, b
).to_le_abi()
152 pub extern "C" fn __nedf2(a
: f64, b
: f64) -> i32 {
153 cmp(a
, b
).to_le_abi()
156 pub extern "C" fn __gtdf2(a
: f64, b
: f64) -> i32 {
157 cmp(a
, b
).to_ge_abi()
161 #[cfg(target_arch = "arm")]
163 pub extern "aapcs" fn __aeabi_fcmple(a
: f32, b
: f32) -> i32 {
164 (__lesf2(a
, b
) <= 0) as i32
167 pub extern "aapcs" fn __aeabi_fcmpge(a
: f32, b
: f32) -> i32 {
168 (__gesf2(a
, b
) >= 0) as i32
171 pub extern "aapcs" fn __aeabi_fcmpeq(a
: f32, b
: f32) -> i32 {
172 (__eqsf2(a
, b
) == 0) as i32
175 pub extern "aapcs" fn __aeabi_fcmplt(a
: f32, b
: f32) -> i32 {
176 (__ltsf2(a
, b
) < 0) as i32
179 pub extern "aapcs" fn __aeabi_fcmpgt(a
: f32, b
: f32) -> i32 {
180 (__gtsf2(a
, b
) > 0) as i32
183 pub extern "aapcs" fn __aeabi_dcmple(a
: f64, b
: f64) -> i32 {
184 (__ledf2(a
, b
) <= 0) as i32
187 pub extern "aapcs" fn __aeabi_dcmpge(a
: f64, b
: f64) -> i32 {
188 (__gedf2(a
, b
) >= 0) as i32
191 pub extern "aapcs" fn __aeabi_dcmpeq(a
: f64, b
: f64) -> i32 {
192 (__eqdf2(a
, b
) == 0) as i32
195 pub extern "aapcs" fn __aeabi_dcmplt(a
: f64, b
: f64) -> i32 {
196 (__ltdf2(a
, b
) < 0) as i32
199 pub extern "aapcs" fn __aeabi_dcmpgt(a
: f64, b
: f64) -> i32 {
200 (__gtdf2(a
, b
) > 0) as i32
203 // On hard-float targets LLVM will use native instructions
204 // for all VFP intrinsics below
206 pub extern "C" fn __gesf2vfp(a
: f32, b
: f32) -> i32 {
210 pub extern "C" fn __gedf2vfp(a
: f64, b
: f64) -> i32 {
214 pub extern "C" fn __gtsf2vfp(a
: f32, b
: f32) -> i32 {
218 pub extern "C" fn __gtdf2vfp(a
: f64, b
: f64) -> i32 {
222 pub extern "C" fn __ltsf2vfp(a
: f32, b
: f32) -> i32 {
226 pub extern "C" fn __ltdf2vfp(a
: f64, b
: f64) -> i32 {
230 pub extern "C" fn __lesf2vfp(a
: f32, b
: f32) -> i32 {
234 pub extern "C" fn __ledf2vfp(a
: f64, b
: f64) -> i32 {
238 pub extern "C" fn __nesf2vfp(a
: f32, b
: f32) -> i32 {
242 pub extern "C" fn __nedf2vfp(a
: f64, b
: f64) -> i32 {
246 pub extern "C" fn __eqsf2vfp(a
: f32, b
: f32) -> i32 {
250 pub extern "C" fn __eqdf2vfp(a
: f64, b
: f64) -> i32 {