]> git.proxmox.com Git - rustc.git/blame - vendor/compiler_builtins/src/float/cmp.rs
New upstream version 1.36.0+dfsg1
[rustc.git] / vendor / compiler_builtins / src / float / cmp.rs
CommitLineData
2c00a5a8
XL
1#![allow(unreachable_code)]
2
2c00a5a8 3use float::Float;
48663c56 4use int::{CastInto, Int};
2c00a5a8
XL
5
6#[derive(Clone, Copy)]
7enum Result {
8 Less,
9 Equal,
10 Greater,
48663c56 11 Unordered,
2c00a5a8
XL
12}
13
14impl Result {
15 fn to_le_abi(self) -> i32 {
16 match self {
48663c56
XL
17 Result::Less => -1,
18 Result::Equal => 0,
19 Result::Greater => 1,
20 Result::Unordered => 1,
2c00a5a8
XL
21 }
22 }
23
24 fn to_ge_abi(self) -> i32 {
25 match self {
48663c56
XL
26 Result::Less => -1,
27 Result::Equal => 0,
28 Result::Greater => 1,
29 Result::Unordered => -1,
2c00a5a8
XL
30 }
31 }
32}
33
48663c56
XL
34fn cmp<F: Float>(a: F, b: F) -> Result
35where
2c00a5a8
XL
36 u32: CastInto<F::Int>,
37 F::Int: CastInto<u32>,
38 i32: CastInto<F::Int>,
39 F::Int: CastInto<i32>,
40{
48663c56
XL
41 let one = F::Int::ONE;
42 let zero = F::Int::ZERO;
2c00a5a8
XL
43 let szero = F::SignedInt::ZERO;
44
48663c56
XL
45 let sign_bit = F::SIGN_MASK as F::Int;
46 let abs_mask = sign_bit - one;
2c00a5a8 47 let exponent_mask = F::EXPONENT_MASK;
48663c56 48 let inf_rep = exponent_mask;
2c00a5a8 49
48663c56
XL
50 let a_rep = a.repr();
51 let b_rep = b.repr();
52 let a_abs = a_rep & abs_mask;
53 let b_abs = b_rep & abs_mask;
2c00a5a8
XL
54
55 // If either a or b is NaN, they are unordered.
56 if a_abs > inf_rep || b_abs > inf_rep {
48663c56 57 return Result::Unordered;
2c00a5a8
XL
58 }
59
60 // If a and b are both zeros, they are equal.
61 if a_abs | b_abs == zero {
48663c56 62 return Result::Equal;
2c00a5a8
XL
63 }
64
65 let a_srep = a.signed_repr();
66 let b_srep = b.signed_repr();
67
68 // If at least one of a and b is positive, we get the same result comparing
69 // a and b as signed integers as we would with a fp_ting-point compare.
70 if a_srep & b_srep >= szero {
71 if a_srep < b_srep {
48663c56 72 return Result::Less;
2c00a5a8 73 } else if a_srep == b_srep {
48663c56 74 return Result::Equal;
2c00a5a8 75 } else {
48663c56 76 return Result::Greater;
2c00a5a8
XL
77 }
78 }
2c00a5a8
XL
79 // Otherwise, both are negative, so we need to flip the sense of the
80 // comparison to get the correct result. (This assumes a twos- or ones-
81 // complement integer representation; if integers are represented in a
82 // sign-magnitude representation, then this flip is incorrect).
83 else {
84 if a_srep > b_srep {
48663c56 85 return Result::Less;
2c00a5a8 86 } else if a_srep == b_srep {
48663c56 87 return Result::Equal;
2c00a5a8 88 } else {
48663c56 89 return Result::Greater;
2c00a5a8
XL
90 }
91 }
92}
48663c56
XL
93fn unord<F: Float>(a: F, b: F) -> bool
94where
2c00a5a8
XL
95 u32: CastInto<F::Int>,
96 F::Int: CastInto<u32>,
97 i32: CastInto<F::Int>,
98 F::Int: CastInto<i32>,
99{
100 let one = F::Int::ONE;
101
48663c56
XL
102 let sign_bit = F::SIGN_MASK as F::Int;
103 let abs_mask = sign_bit - one;
2c00a5a8 104 let exponent_mask = F::EXPONENT_MASK;
48663c56 105 let inf_rep = exponent_mask;
2c00a5a8
XL
106
107 let a_rep = a.repr();
108 let b_rep = b.repr();
109 let a_abs = a_rep & abs_mask;
110 let b_abs = b_rep & abs_mask;
111
112 a_abs > inf_rep || b_abs > inf_rep
113}
114
115intrinsics! {
116 pub extern "C" fn __lesf2(a: f32, b: f32) -> i32 {
117 cmp(a, b).to_le_abi()
118 }
119
120 pub extern "C" fn __gesf2(a: f32, b: f32) -> i32 {
121 cmp(a, b).to_ge_abi()
122 }
123
124 #[arm_aeabi_alias = __aeabi_fcmpun]
125 pub extern "C" fn __unordsf2(a: f32, b: f32) -> i32 {
126 unord(a, b) as i32
127 }
128
129 pub extern "C" fn __eqsf2(a: f32, b: f32) -> i32 {
130 cmp(a, b).to_le_abi()
131 }
132
133 pub extern "C" fn __ltsf2(a: f32, b: f32) -> i32 {
134 cmp(a, b).to_le_abi()
135 }
136
137 pub extern "C" fn __nesf2(a: f32, b: f32) -> i32 {
138 cmp(a, b).to_le_abi()
139 }
140
141 pub extern "C" fn __gtsf2(a: f32, b: f32) -> i32 {
142 cmp(a, b).to_ge_abi()
143 }
144
145 pub extern "C" fn __ledf2(a: f64, b: f64) -> i32 {
146 cmp(a, b).to_le_abi()
147 }
148
149 pub extern "C" fn __gedf2(a: f64, b: f64) -> i32 {
150 cmp(a, b).to_ge_abi()
151 }
152
153 #[arm_aeabi_alias = __aeabi_dcmpun]
154 pub extern "C" fn __unorddf2(a: f64, b: f64) -> i32 {
155 unord(a, b) as i32
156 }
157
158 pub extern "C" fn __eqdf2(a: f64, b: f64) -> i32 {
159 cmp(a, b).to_le_abi()
160 }
161
162 pub extern "C" fn __ltdf2(a: f64, b: f64) -> i32 {
163 cmp(a, b).to_le_abi()
164 }
165
166 pub extern "C" fn __nedf2(a: f64, b: f64) -> i32 {
167 cmp(a, b).to_le_abi()
168 }
169
170 pub extern "C" fn __gtdf2(a: f64, b: f64) -> i32 {
171 cmp(a, b).to_ge_abi()
172 }
173}
174
175#[cfg(target_arch = "arm")]
176intrinsics! {
177 pub extern "aapcs" fn __aeabi_fcmple(a: f32, b: f32) -> i32 {
0531ce1d 178 (__lesf2(a, b) <= 0) as i32
2c00a5a8
XL
179 }
180
181 pub extern "aapcs" fn __aeabi_fcmpge(a: f32, b: f32) -> i32 {
182 (__gesf2(a, b) >= 0) as i32
183 }
184
185 pub extern "aapcs" fn __aeabi_fcmpeq(a: f32, b: f32) -> i32 {
186 (__eqsf2(a, b) == 0) as i32
187 }
188
189 pub extern "aapcs" fn __aeabi_fcmplt(a: f32, b: f32) -> i32 {
190 (__ltsf2(a, b) < 0) as i32
191 }
192
193 pub extern "aapcs" fn __aeabi_fcmpgt(a: f32, b: f32) -> i32 {
194 (__gtsf2(a, b) > 0) as i32
195 }
196
197 pub extern "aapcs" fn __aeabi_dcmple(a: f64, b: f64) -> i32 {
198 (__ledf2(a, b) <= 0) as i32
199 }
200
201 pub extern "aapcs" fn __aeabi_dcmpge(a: f64, b: f64) -> i32 {
202 (__gedf2(a, b) >= 0) as i32
203 }
204
205 pub extern "aapcs" fn __aeabi_dcmpeq(a: f64, b: f64) -> i32 {
206 (__eqdf2(a, b) == 0) as i32
207 }
208
209 pub extern "aapcs" fn __aeabi_dcmplt(a: f64, b: f64) -> i32 {
210 (__ltdf2(a, b) < 0) as i32
211 }
212
213 pub extern "aapcs" fn __aeabi_dcmpgt(a: f64, b: f64) -> i32 {
214 (__gtdf2(a, b) > 0) as i32
215 }
0531ce1d
XL
216
217 // On hard-float targets LLVM will use native instructions
218 // for all VFP intrinsics below
219
220 pub extern "C" fn __gesf2vfp(a: f32, b: f32) -> i32 {
221 (a >= b) as i32
222 }
223
224 pub extern "C" fn __gedf2vfp(a: f64, b: f64) -> i32 {
225 (a >= b) as i32
226 }
227
228 pub extern "C" fn __gtsf2vfp(a: f32, b: f32) -> i32 {
229 (a > b) as i32
230 }
231
232 pub extern "C" fn __gtdf2vfp(a: f64, b: f64) -> i32 {
233 (a > b) as i32
234 }
235
236 pub extern "C" fn __ltsf2vfp(a: f32, b: f32) -> i32 {
237 (a < b) as i32
238 }
239
240 pub extern "C" fn __ltdf2vfp(a: f64, b: f64) -> i32 {
241 (a < b) as i32
242 }
243
244 pub extern "C" fn __lesf2vfp(a: f32, b: f32) -> i32 {
245 (a <= b) as i32
246 }
247
248 pub extern "C" fn __ledf2vfp(a: f64, b: f64) -> i32 {
249 (a <= b) as i32
250 }
251
252 pub extern "C" fn __nesf2vfp(a: f32, b: f32) -> i32 {
253 (a != b) as i32
254 }
255
256 pub extern "C" fn __nedf2vfp(a: f64, b: f64) -> i32 {
257 (a != b) as i32
258 }
259
260 pub extern "C" fn __eqsf2vfp(a: f32, b: f32) -> i32 {
261 (a == b) as i32
262 }
263
264 pub extern "C" fn __eqdf2vfp(a: f64, b: f64) -> i32 {
265 (a == b) as i32
266 }
2c00a5a8 267}