]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
New upstream version 1.52.0~beta.3+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
CommitLineData
60c5eb7d
XL
1use super::operand::{OperandRef, OperandValue};
2use super::place::PlaceRef;
dfeec247 3use super::{FunctionCx, LocalRef};
60c5eb7d
XL
4
5use crate::base;
dfeec247 6use crate::common::{self, IntPredicate, RealPredicate};
60c5eb7d 7use crate::traits::*;
dfeec247 8use crate::MemFlags;
60c5eb7d 9
dfeec247 10use rustc_apfloat::{ieee, Float, Round, Status};
3dfed10e 11use rustc_hir::lang_items::LangItem;
ba9703b0
XL
12use rustc_middle::mir;
13use rustc_middle::ty::cast::{CastTy, IntTy};
3dfed10e 14use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
ba9703b0 15use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
dfeec247
XL
16use rustc_span::source_map::{Span, DUMMY_SP};
17use rustc_span::symbol::sym;
ba9703b0 18use rustc_target::abi::{Abi, Int, LayoutOf, Variants};
92a42be0 19
dc9dc135 20impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
a1dfa0c6
XL
21 pub fn codegen_rvalue(
22 &mut self,
23 mut bx: Bx,
24 dest: PlaceRef<'tcx, Bx::Value>,
dfeec247 25 rvalue: &mir::Rvalue<'tcx>,
a1dfa0c6 26 ) -> Bx {
dfeec247 27 debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
92a42be0
SL
28
29 match *rvalue {
dfeec247
XL
30 mir::Rvalue::Use(ref operand) => {
31 let cg_operand = self.codegen_operand(&mut bx, operand);
32 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
33 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
34 cg_operand.val.store(&mut bx, dest);
35 bx
36 }
92a42be0 37
48663c56 38 mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
ff7c6d11
XL
39 // The destination necessarily contains a fat pointer, so if
40 // it's a scalar pair, it's a fat pointer or newtype thereof.
a1dfa0c6 41 if bx.cx().is_backend_scalar_pair(dest.layout) {
60c5eb7d 42 // Into-coerce of a thin pointer to a fat pointer -- just
92a42be0 43 // use the operand path.
a1dfa0c6
XL
44 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
45 temp.val.store(&mut bx, dest);
2c00a5a8 46 return bx;
92a42be0
SL
47 }
48
49 // Unsize of a nontrivial struct. I would prefer for
94b46f34 50 // this to be eliminated by MIR building, but
92a42be0
SL
51 // `CoerceUnsized` can be passed by a where-clause,
52 // so the (generic) MIR may not be able to expand it.
a1dfa0c6 53 let operand = self.codegen_operand(&mut bx, source);
ff7c6d11 54 match operand.val {
dfeec247 55 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
60c5eb7d 56 // Unsize from an immediate structure. We don't
32a655c1
SL
57 // really need a temporary alloca here, but
58 // avoiding it would require us to have
60c5eb7d 59 // `coerce_unsized_into` use `extractvalue` to
32a655c1
SL
60 // index into the struct, and this case isn't
61 // important enough for it.
94b46f34 62 debug!("codegen_rvalue: creating ugly alloca");
e1599b0c 63 let scratch = PlaceRef::alloca(&mut bx, operand.layout);
a1dfa0c6
XL
64 scratch.storage_live(&mut bx);
65 operand.val.store(&mut bx, scratch);
66 base::coerce_unsized_into(&mut bx, scratch, dest);
67 scratch.storage_dead(&mut bx);
32a655c1 68 }
b7449926 69 OperandValue::Ref(llref, None, align) => {
e1599b0c 70 let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
a1dfa0c6 71 base::coerce_unsized_into(&mut bx, source, dest);
92a42be0 72 }
b7449926 73 OperandValue::Ref(_, Some(_), _) => {
60c5eb7d 74 bug!("unsized coercion on an unsized rvalue");
b7449926 75 }
ff7c6d11 76 }
2c00a5a8 77 bx
92a42be0
SL
78 }
79
ea8adc8c 80 mir::Rvalue::Repeat(ref elem, count) => {
a1dfa0c6 81 let cg_elem = self.codegen_operand(&mut bx, elem);
3b2f2976 82
ff7c6d11
XL
83 // Do not generate the loop for zero-sized elements or empty arrays.
84 if dest.layout.is_zst() {
2c00a5a8 85 return bx;
3b2f2976 86 }
3b2f2976 87
94b46f34 88 if let OperandValue::Immediate(v) = cg_elem.val {
532ac7d7
XL
89 let zero = bx.const_usize(0);
90 let start = dest.project_index(&mut bx, zero).llval;
91 let size = bx.const_usize(dest.layout.size.bytes());
ff7c6d11 92
3b2f2976 93 // Use llvm.memset.p0i8.* to initialize all zero arrays
e74abb32 94 if bx.cx().const_to_opt_uint(v) == Some(0) {
a1dfa0c6
XL
95 let fill = bx.cx().const_u8(0);
96 bx.memset(start, fill, size, dest.align, MemFlags::empty());
2c00a5a8 97 return bx;
3b2f2976
XL
98 }
99
100 // Use llvm.memset.p0i8.* to initialize byte arrays
1b1a35ee 101 let v = bx.from_immediate(v);
a1dfa0c6
XL
102 if bx.cx().val_ty(v) == bx.cx().type_i8() {
103 bx.memset(start, v, size, dest.align, MemFlags::empty());
2c00a5a8 104 return bx;
3b2f2976
XL
105 }
106 }
107
ba9703b0 108 let count =
fc512014 109 self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
ba9703b0 110
532ac7d7 111 bx.write_operand_repeatedly(cg_elem, count, dest)
92a42be0
SL
112 }
113
9cc50fc6 114 mir::Rvalue::Aggregate(ref kind, ref operands) => {
ff7c6d11 115 let (dest, active_field_index) = match **kind {
b7449926 116 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
a1dfa0c6 117 dest.codegen_set_discr(&mut bx, variant_index);
ff7c6d11 118 if adt_def.is_enum() {
a1dfa0c6 119 (dest.project_downcast(&mut bx, variant_index), active_field_index)
ff7c6d11
XL
120 } else {
121 (dest, active_field_index)
9cc50fc6
SL
122 }
123 }
dfeec247 124 _ => (dest, None),
ff7c6d11
XL
125 };
126 for (i, operand) in operands.iter().enumerate() {
a1dfa0c6 127 let op = self.codegen_operand(&mut bx, operand);
ff7c6d11
XL
128 // Do not generate stores and GEPis for zero-sized fields.
129 if !op.layout.is_zst() {
130 let field_index = active_field_index.unwrap_or(i);
a1dfa0c6
XL
131 let field = dest.project_field(&mut bx, field_index);
132 op.val.store(&mut bx, field);
ff7c6d11 133 }
92a42be0 134 }
2c00a5a8 135 bx
92a42be0
SL
136 }
137
92a42be0 138 _ => {
416331ca 139 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
a1dfa0c6
XL
140 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
141 temp.val.store(&mut bx, dest);
2c00a5a8 142 bx
92a42be0
SL
143 }
144 }
145 }
146
a1dfa0c6
XL
147 pub fn codegen_rvalue_unsized(
148 &mut self,
149 mut bx: Bx,
150 indirect_dest: PlaceRef<'tcx, Bx::Value>,
151 rvalue: &mir::Rvalue<'tcx>,
152 ) -> Bx {
dfeec247
XL
153 debug!(
154 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
155 indirect_dest.llval, rvalue
156 );
b7449926
XL
157
158 match *rvalue {
159 mir::Rvalue::Use(ref operand) => {
a1dfa0c6
XL
160 let cg_operand = self.codegen_operand(&mut bx, operand);
161 cg_operand.val.store_unsized(&mut bx, indirect_dest);
b7449926
XL
162 bx
163 }
164
60c5eb7d 165 _ => bug!("unsized assignment other than `Rvalue::Use`"),
b7449926
XL
166 }
167 }
168
a1dfa0c6
XL
169 pub fn codegen_rvalue_operand(
170 &mut self,
171 mut bx: Bx,
dfeec247 172 rvalue: &mir::Rvalue<'tcx>,
a1dfa0c6 173 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
416331ca
XL
174 assert!(
175 self.rvalue_creates_operand(rvalue, DUMMY_SP),
176 "cannot codegen {:?} to operand",
177 rvalue,
178 );
92a42be0
SL
179
180 match *rvalue {
ff7c6d11 181 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
a1dfa0c6 182 let operand = self.codegen_operand(&mut bx, source);
54a0048b 183 debug!("cast operand is {:?}", operand);
fc512014 184 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
92a42be0
SL
185
186 let val = match *kind {
48663c56 187 mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
1b1a35ee 188 match *operand.layout.ty.kind() {
b7449926 189 ty::FnDef(def_id, substs) => {
48663c56
XL
190 if bx.cx().tcx().has_attr(def_id, sym::rustc_args_required_const) {
191 bug!("reifying a fn ptr that requires const arguments");
2c00a5a8 192 }
3dfed10e
XL
193 let instance = ty::Instance::resolve_for_fn_ptr(
194 bx.tcx(),
195 ty::ParamEnv::reveal_all(),
196 def_id,
197 substs,
e74abb32 198 )
3dfed10e
XL
199 .unwrap()
200 .polymorphize(bx.cx().tcx());
201 OperandValue::Immediate(bx.get_fn_addr(instance))
54a0048b 202 }
dfeec247 203 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
54a0048b
SL
204 }
205 }
48663c56 206 mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
1b1a35ee 207 match *operand.layout.ty.kind() {
b7449926 208 ty::Closure(def_id, substs) => {
dc9dc135 209 let instance = Instance::resolve_closure(
e74abb32
XL
210 bx.cx().tcx(),
211 def_id,
212 substs,
dfeec247 213 ty::ClosureKind::FnOnce,
3dfed10e
XL
214 )
215 .polymorphize(bx.cx().tcx());
e74abb32 216 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
8bb4bdeb 217 }
dfeec247 218 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
8bb4bdeb
XL
219 }
220 }
48663c56 221 mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
60c5eb7d 222 // This is a no-op at the LLVM level.
92a42be0
SL
223 operand.val
224 }
48663c56 225 mir::CastKind::Pointer(PointerCast::Unsize) => {
a1dfa0c6 226 assert!(bx.cx().is_backend_scalar_pair(cast));
92a42be0 227 match operand.val {
3157f602 228 OperandValue::Pair(lldata, llextra) => {
60c5eb7d 229 // unsize from a fat pointer -- this is a
92a42be0 230 // "trait-object-to-supertrait" coercion, for
60c5eb7d 231 // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
ff7c6d11
XL
232
233 // HACK(eddyb) have to bitcast pointers
234 // until LLVM removes pointee types.
dfeec247
XL
235 let lldata = bx.pointercast(
236 lldata,
237 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
238 );
3157f602 239 OperandValue::Pair(lldata, llextra)
92a42be0
SL
240 }
241 OperandValue::Immediate(lldata) => {
242 // "standard" unsize
dfeec247
XL
243 let (lldata, llextra) = base::unsize_thin_ptr(
244 &mut bx,
245 lldata,
246 operand.layout.ty,
247 cast.ty,
248 );
3157f602 249 OperandValue::Pair(lldata, llextra)
92a42be0 250 }
32a655c1 251 OperandValue::Ref(..) => {
dfeec247 252 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
92a42be0
SL
253 }
254 }
255 }
dfeec247
XL
256 mir::CastKind::Pointer(PointerCast::MutToConstPointer)
257 | mir::CastKind::Misc
258 if bx.cx().is_backend_scalar_pair(operand.layout) =>
259 {
ff7c6d11 260 if let OperandValue::Pair(data_ptr, meta) = operand.val {
a1dfa0c6 261 if bx.cx().is_backend_scalar_pair(cast) {
dfeec247
XL
262 let data_cast = bx.pointercast(
263 data_ptr,
264 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
265 );
ff7c6d11 266 OperandValue::Pair(data_cast, meta)
dfeec247
XL
267 } else {
268 // cast to thin-ptr
5bcae85e
SL
269 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
270 // pointer-cast of that pointer to desired pointer type.
a1dfa0c6 271 let llcast_ty = bx.cx().immediate_backend_type(cast);
2c00a5a8 272 let llval = bx.pointercast(data_ptr, llcast_ty);
5bcae85e
SL
273 OperandValue::Immediate(llval)
274 }
275 } else {
60c5eb7d 276 bug!("unexpected non-pair operand");
5bcae85e
SL
277 }
278 }
ba9703b0
XL
279 mir::CastKind::Pointer(
280 PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
281 )
dfeec247 282 | mir::CastKind::Misc => {
a1dfa0c6
XL
283 assert!(bx.cx().is_backend_immediate(cast));
284 let ll_t_out = bx.cx().immediate_backend_type(cast);
0bf4aa26 285 if operand.layout.abi.is_uninhabited() {
a1dfa0c6 286 let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
dfeec247 287 return (bx, OperandRef { val, layout: cast });
83c7162d 288 }
dfeec247
XL
289 let r_t_in =
290 CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
ff7c6d11 291 let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
a1dfa0c6 292 let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
83c7162d 293 match operand.layout.variants {
ba9703b0 294 Variants::Single { index } => {
48663c56
XL
295 if let Some(discr) =
296 operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
297 {
ba9703b0
XL
298 let discr_layout = bx.cx().layout_of(discr.ty);
299 let discr_t = bx.cx().immediate_backend_type(discr_layout);
300 let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
301 let discr_val =
302 bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
303
dfeec247
XL
304 return (
305 bx,
306 OperandRef {
307 val: OperandValue::Immediate(discr_val),
308 layout: cast,
309 },
310 );
83c7162d
XL
311 }
312 }
ba9703b0 313 Variants::Multiple { .. } => {}
83c7162d 314 }
32a655c1 315 let llval = operand.immediate();
c30ab7b3 316
ff7c6d11 317 let mut signed = false;
ba9703b0
XL
318 if let Abi::Scalar(ref scalar) = operand.layout.abi {
319 if let Int(_, s) = scalar.value {
94b46f34 320 // We use `i1` for bytes that are always `0` or `1`,
0731742a 321 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
94b46f34 322 // let LLVM interpret the `i1` as signed, because
0731742a 323 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
94b46f34 324 signed = !scalar.is_bool() && s;
ff7c6d11 325
a1dfa0c6 326 let er = scalar.valid_range_exclusive(bx.cx());
dfeec247 327 if er.end != er.start
6a06907d 328 && scalar.valid_range.end() >= scalar.valid_range.start()
dfeec247 329 {
1b1a35ee 330 // We want `table[e as usize ± k]` to not
ff7c6d11 331 // have bound checks, and this is the most
1b1a35ee
XL
332 // convenient place to put the `assume`s.
333 if *scalar.valid_range.start() > 0 {
334 let enum_value_lower_bound = bx
335 .cx()
336 .const_uint_big(ll_t_in, *scalar.valid_range.start());
337 let cmp_start = bx.icmp(
338 IntPredicate::IntUGE,
339 llval,
340 enum_value_lower_bound,
341 );
342 bx.assume(cmp_start);
343 }
344
345 let enum_value_upper_bound =
a1dfa0c6 346 bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
1b1a35ee
XL
347 let cmp_end = bx.icmp(
348 IntPredicate::IntULE,
349 llval,
350 enum_value_upper_bound,
351 );
352 bx.assume(cmp_end);
ff7c6d11
XL
353 }
354 }
355 }
9cc50fc6
SL
356
357 let newval = match (r_t_in, r_t_out) {
dfeec247 358 (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
9cc50fc6 359 (CastTy::Float, CastTy::Float) => {
a1dfa0c6
XL
360 let srcsz = bx.cx().float_width(ll_t_in);
361 let dstsz = bx.cx().float_width(ll_t_out);
9cc50fc6 362 if dstsz > srcsz {
2c00a5a8 363 bx.fpext(llval, ll_t_out)
9cc50fc6 364 } else if srcsz > dstsz {
2c00a5a8 365 bx.fptrunc(llval, ll_t_out)
9cc50fc6
SL
366 } else {
367 llval
368 }
369 }
dfeec247
XL
370 (CastTy::Int(_), CastTy::Float) => {
371 if signed {
372 bx.sitofp(llval, ll_t_out)
373 } else {
374 bx.uitofp(llval, ll_t_out)
375 }
376 }
ba9703b0 377 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
dfeec247
XL
378 bx.pointercast(llval, ll_t_out)
379 }
ba9703b0 380 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
dfeec247
XL
381 bx.ptrtoint(llval, ll_t_out)
382 }
2c00a5a8 383 (CastTy::Int(_), CastTy::Ptr(_)) => {
a1dfa0c6 384 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
2c00a5a8
XL
385 bx.inttoptr(usize_llval, ll_t_out)
386 }
dfeec247 387 (CastTy::Float, CastTy::Int(IntTy::I)) => {
3dfed10e 388 cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out, cast)
dfeec247
XL
389 }
390 (CastTy::Float, CastTy::Int(_)) => {
3dfed10e 391 cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out, cast)
dfeec247
XL
392 }
393 _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
9cc50fc6
SL
394 };
395 OperandValue::Immediate(newval)
396 }
92a42be0 397 };
dfeec247 398 (bx, OperandRef { val, layout: cast })
92a42be0
SL
399 }
400
ba9703b0 401 mir::Rvalue::Ref(_, bk, place) => {
dfeec247
XL
402 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
403 tcx.mk_ref(
404 tcx.lifetimes.re_erased,
405 ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
406 )
407 };
408 self.codegen_place_to_pointer(bx, place, mk_ref)
409 }
92a42be0 410
ba9703b0 411 mir::Rvalue::AddressOf(mutability, place) => {
dfeec247 412 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
74b04a01 413 tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
7453a54e 414 };
dfeec247 415 self.codegen_place_to_pointer(bx, place, mk_ptr)
92a42be0
SL
416 }
417
ba9703b0 418 mir::Rvalue::Len(place) => {
a1dfa0c6 419 let size = self.evaluate_array_len(&mut bx, place);
7453a54e 420 let operand = OperandRef {
3b2f2976 421 val: OperandValue::Immediate(size),
a1dfa0c6 422 layout: bx.cx().layout_of(bx.tcx().types.usize),
7453a54e 423 };
2c00a5a8 424 (bx, operand)
92a42be0
SL
425 }
426
6a06907d 427 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
a1dfa0c6
XL
428 let lhs = self.codegen_operand(&mut bx, lhs);
429 let rhs = self.codegen_operand(&mut bx, rhs);
ff7c6d11 430 let llresult = match (lhs.val, rhs.val) {
dfeec247
XL
431 (
432 OperandValue::Pair(lhs_addr, lhs_extra),
433 OperandValue::Pair(rhs_addr, rhs_extra),
434 ) => self.codegen_fat_ptr_binop(
435 &mut bx,
436 op,
437 lhs_addr,
438 lhs_extra,
439 rhs_addr,
440 rhs_extra,
441 lhs.layout.ty,
442 ),
443
444 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
a1dfa0c6 445 self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
ff7c6d11
XL
446 }
447
dfeec247 448 _ => bug!(),
92a42be0 449 };
7453a54e 450 let operand = OperandRef {
92a42be0 451 val: OperandValue::Immediate(llresult),
dfeec247 452 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
7453a54e 453 };
2c00a5a8 454 (bx, operand)
92a42be0 455 }
6a06907d 456 mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
a1dfa0c6
XL
457 let lhs = self.codegen_operand(&mut bx, lhs);
458 let rhs = self.codegen_operand(&mut bx, rhs);
dfeec247
XL
459 let result = self.codegen_scalar_checked_binop(
460 &mut bx,
461 op,
462 lhs.immediate(),
463 rhs.immediate(),
464 lhs.layout.ty,
465 );
2c00a5a8 466 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
0531ce1d 467 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
dfeec247 468 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
3157f602 469
2c00a5a8 470 (bx, operand)
3157f602 471 }
92a42be0
SL
472
473 mir::Rvalue::UnaryOp(op, ref operand) => {
a1dfa0c6 474 let operand = self.codegen_operand(&mut bx, operand);
92a42be0 475 let lloperand = operand.immediate();
dc9dc135 476 let is_float = operand.layout.ty.is_floating_point();
92a42be0 477 let llval = match op {
2c00a5a8 478 mir::UnOp::Not => bx.not(lloperand),
dfeec247
XL
479 mir::UnOp::Neg => {
480 if is_float {
481 bx.fneg(lloperand)
482 } else {
483 bx.neg(lloperand)
484 }
92a42be0
SL
485 }
486 };
dfeec247 487 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
92a42be0
SL
488 }
489
ff7c6d11 490 mir::Rvalue::Discriminant(ref place) => {
f9f354fc 491 let discr_ty = rvalue.ty(self.mir, bx.tcx());
fc512014 492 let discr_ty = self.monomorphize(discr_ty);
dfeec247 493 let discr = self
74b04a01 494 .codegen_place(&mut bx, place.as_ref())
a1dfa0c6 495 .codegen_get_discr(&mut bx, discr_ty);
dfeec247
XL
496 (
497 bx,
498 OperandRef {
499 val: OperandValue::Immediate(discr),
500 layout: self.cx.layout_of(discr_ty),
501 },
502 )
8bb4bdeb
XL
503 }
504
7cac9316 505 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
fc512014 506 let ty = self.monomorphize(ty);
a1dfa0c6
XL
507 assert!(bx.cx().type_is_sized(ty));
508 let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
509 let tcx = self.cx.tcx();
dfeec247
XL
510 (
511 bx,
512 OperandRef {
513 val: OperandValue::Immediate(val),
514 layout: self.cx.layout_of(tcx.types.usize),
515 },
516 )
7cac9316
XL
517 }
518
519 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
fc512014 520 let content_ty = self.monomorphize(content_ty);
a1dfa0c6
XL
521 let content_layout = bx.cx().layout_of(content_ty);
522 let llsize = bx.cx().const_usize(content_layout.size.bytes());
523 let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
524 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
525 let llty_ptr = bx.cx().backend_type(box_layout);
32a655c1
SL
526
527 // Allocate space:
3dfed10e 528 let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
32a655c1
SL
529 Ok(id) => id,
530 Err(s) => {
a1dfa0c6 531 bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
32a655c1
SL
532 }
533 };
2c00a5a8 534 let instance = ty::Instance::mono(bx.tcx(), def_id);
e74abb32 535 let r = bx.cx().get_fn_addr(instance);
a1dfa0c6
XL
536 let call = bx.call(r, &[llsize, llalign], None);
537 let val = bx.pointercast(call, llty_ptr);
32a655c1 538
dfeec247 539 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
2c00a5a8 540 (bx, operand)
92a42be0 541 }
f9f354fc
XL
542 mir::Rvalue::ThreadLocalRef(def_id) => {
543 assert!(bx.cx().tcx().is_static(def_id));
544 let static_ = bx.get_static(def_id);
545 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
546 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
547 (bx, operand)
548 }
a7813a04 549 mir::Rvalue::Use(ref operand) => {
a1dfa0c6 550 let operand = self.codegen_operand(&mut bx, operand);
2c00a5a8 551 (bx, operand)
a7813a04 552 }
dfeec247 553 mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
cc61c64b
XL
554 // According to `rvalue_creates_operand`, only ZST
555 // aggregate rvalues are allowed to be operands.
f9f354fc 556 let ty = rvalue.ty(self.mir, self.cx.tcx());
dfeec247 557 let operand =
fc512014 558 OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
532ac7d7 559 (bx, operand)
92a42be0
SL
560 }
561 }
562 }
563
ba9703b0 564 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
3b2f2976 565 // ZST are passed as operands and require special handling
94b46f34 566 // because codegen_place() panics if Local is operand.
e74abb32 567 if let Some(index) = place.as_local() {
3b2f2976 568 if let LocalRef::Operand(Some(op)) = self.locals[index] {
1b1a35ee 569 if let ty::Array(_, n) = op.layout.ty.kind() {
416331ca 570 let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
a1dfa0c6 571 return bx.cx().const_usize(n);
3b2f2976
XL
572 }
573 }
574 }
575 // use common size calculation for non zero-sized types
74b04a01 576 let cg_value = self.codegen_place(bx, place.as_ref());
416331ca 577 cg_value.len(bx.cx())
3b2f2976
XL
578 }
579
dfeec247
XL
580 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
581 fn codegen_place_to_pointer(
582 &mut self,
583 mut bx: Bx,
ba9703b0 584 place: mir::Place<'tcx>,
dfeec247
XL
585 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
586 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
74b04a01 587 let cg_place = self.codegen_place(&mut bx, place.as_ref());
dfeec247
XL
588
589 let ty = cg_place.layout.ty;
590
591 // Note: places are indirect, so storing the `llval` into the
592 // destination effectively creates a reference.
593 let val = if !bx.cx().type_has_metadata(ty) {
594 OperandValue::Immediate(cg_place.llval)
595 } else {
596 OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
597 };
598 (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
599 }
600
b7449926
XL
601 pub fn codegen_scalar_binop(
602 &mut self,
a1dfa0c6 603 bx: &mut Bx,
b7449926 604 op: mir::BinOp,
a1dfa0c6
XL
605 lhs: Bx::Value,
606 rhs: Bx::Value,
b7449926 607 input_ty: Ty<'tcx>,
a1dfa0c6 608 ) -> Bx::Value {
dc9dc135 609 let is_float = input_ty.is_floating_point();
92a42be0
SL
610 let is_signed = input_ty.is_signed();
611 match op {
dfeec247
XL
612 mir::BinOp::Add => {
613 if is_float {
614 bx.fadd(lhs, rhs)
615 } else {
616 bx.add(lhs, rhs)
617 }
618 }
619 mir::BinOp::Sub => {
620 if is_float {
621 bx.fsub(lhs, rhs)
622 } else {
623 bx.sub(lhs, rhs)
624 }
625 }
626 mir::BinOp::Mul => {
627 if is_float {
628 bx.fmul(lhs, rhs)
629 } else {
630 bx.mul(lhs, rhs)
631 }
632 }
633 mir::BinOp::Div => {
634 if is_float {
635 bx.fdiv(lhs, rhs)
636 } else if is_signed {
637 bx.sdiv(lhs, rhs)
638 } else {
639 bx.udiv(lhs, rhs)
640 }
641 }
642 mir::BinOp::Rem => {
643 if is_float {
644 bx.frem(lhs, rhs)
645 } else if is_signed {
646 bx.srem(lhs, rhs)
647 } else {
648 bx.urem(lhs, rhs)
649 }
650 }
2c00a5a8
XL
651 mir::BinOp::BitOr => bx.or(lhs, rhs),
652 mir::BinOp::BitAnd => bx.and(lhs, rhs),
653 mir::BinOp::BitXor => bx.xor(lhs, rhs),
654 mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
655 mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
656 mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
dfeec247
XL
657 mir::BinOp::Ne
658 | mir::BinOp::Lt
659 | mir::BinOp::Gt
660 | mir::BinOp::Eq
661 | mir::BinOp::Le
662 | mir::BinOp::Ge => {
663 if is_float {
664 bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
665 } else {
666 bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
667 }
c30ab7b3
SL
668 }
669 }
670 }
671
b7449926
XL
672 pub fn codegen_fat_ptr_binop(
673 &mut self,
a1dfa0c6 674 bx: &mut Bx,
b7449926 675 op: mir::BinOp,
a1dfa0c6
XL
676 lhs_addr: Bx::Value,
677 lhs_extra: Bx::Value,
678 rhs_addr: Bx::Value,
679 rhs_extra: Bx::Value,
b7449926 680 _input_ty: Ty<'tcx>,
a1dfa0c6 681 ) -> Bx::Value {
c30ab7b3
SL
682 match op {
683 mir::BinOp::Eq => {
a1dfa0c6
XL
684 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
685 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
686 bx.and(lhs, rhs)
c30ab7b3
SL
687 }
688 mir::BinOp::Ne => {
a1dfa0c6
XL
689 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
690 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
691 bx.or(lhs, rhs)
c30ab7b3 692 }
dfeec247 693 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
c30ab7b3
SL
694 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
695 let (op, strict_op) = match op {
a1dfa0c6
XL
696 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
697 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
698 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
699 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
c30ab7b3
SL
700 _ => bug!(),
701 };
a1dfa0c6
XL
702 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
703 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
704 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
705 let rhs = bx.and(and_lhs, and_rhs);
706 bx.or(lhs, rhs)
c30ab7b3
SL
707 }
708 _ => {
709 bug!("unexpected fat ptr binop");
92a42be0
SL
710 }
711 }
712 }
3157f602 713
a1dfa0c6
XL
714 pub fn codegen_scalar_checked_binop(
715 &mut self,
716 bx: &mut Bx,
717 op: mir::BinOp,
718 lhs: Bx::Value,
719 rhs: Bx::Value,
dfeec247 720 input_ty: Ty<'tcx>,
a1dfa0c6 721 ) -> OperandValue<Bx::Value> {
3157f602
XL
722 // This case can currently arise only from functions marked
723 // with #[rustc_inherit_overflow_checks] and inlined from
724 // another crate (mostly core::num generic/#[inline] fns),
725 // while the current crate doesn't use overflow checks.
a1dfa0c6 726 if !bx.cx().check_overflow() {
94b46f34 727 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
a1dfa0c6 728 return OperandValue::Pair(val, bx.cx().const_bool(false));
3157f602
XL
729 }
730
3157f602
XL
731 let (val, of) = match op {
732 // These are checked using intrinsics
733 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
734 let oop = match op {
735 mir::BinOp::Add => OverflowOp::Add,
736 mir::BinOp::Sub => OverflowOp::Sub,
737 mir::BinOp::Mul => OverflowOp::Mul,
dfeec247 738 _ => unreachable!(),
3157f602 739 };
a1dfa0c6 740 bx.checked_binop(oop, input_ty, lhs, rhs)
3157f602
XL
741 }
742 mir::BinOp::Shl | mir::BinOp::Shr => {
a1dfa0c6
XL
743 let lhs_llty = bx.cx().val_ty(lhs);
744 let rhs_llty = bx.cx().val_ty(rhs);
745 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
2c00a5a8 746 let outer_bits = bx.and(rhs, invert_mask);
3157f602 747
a1dfa0c6 748 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
94b46f34 749 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
3157f602
XL
750
751 (val, of)
752 }
dfeec247 753 _ => bug!("Operator `{:?}` is not a checkable operator", op),
3157f602
XL
754 };
755
756 OperandValue::Pair(val, of)
757 }
a1dfa0c6 758}
92a42be0 759
dc9dc135 760impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
416331ca 761 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
cc61c64b
XL
762 match *rvalue {
763 mir::Rvalue::Ref(..) |
dfeec247 764 mir::Rvalue::AddressOf(..) |
cc61c64b
XL
765 mir::Rvalue::Len(..) |
766 mir::Rvalue::Cast(..) | // (*)
767 mir::Rvalue::BinaryOp(..) |
768 mir::Rvalue::CheckedBinaryOp(..) |
769 mir::Rvalue::UnaryOp(..) |
770 mir::Rvalue::Discriminant(..) |
7cac9316 771 mir::Rvalue::NullaryOp(..) |
f9f354fc 772 mir::Rvalue::ThreadLocalRef(_) |
cc61c64b
XL
773 mir::Rvalue::Use(..) => // (*)
774 true,
775 mir::Rvalue::Repeat(..) |
776 mir::Rvalue::Aggregate(..) => {
f9f354fc 777 let ty = rvalue.ty(self.mir, self.cx.tcx());
fc512014 778 let ty = self.monomorphize(ty);
416331ca 779 self.cx.spanned_layout_of(ty, span).is_zst()
cc61c64b
XL
780 }
781 }
92a42be0 782
cc61c64b
XL
783 // (*) this is only true if the type is suitable
784 }
92a42be0 785}
3157f602 786
dc9dc135 787fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
788 bx: &mut Bx,
789 signed: bool,
790 x: Bx::Value,
791 float_ty: Bx::Type,
dfeec247 792 int_ty: Bx::Type,
3dfed10e 793 int_layout: TyAndLayout<'tcx>,
a1dfa0c6 794) -> Bx::Value {
f9f354fc 795 if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
f035d41b
XL
796 return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
797 }
798
799 let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
800 if let Some(try_sat_result) = try_sat_result {
801 return try_sat_result;
abe05a73 802 }
a1dfa0c6
XL
803
804 let int_width = bx.cx().int_width(int_ty);
805 let float_width = bx.cx().float_width(float_ty);
abe05a73
XL
806 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
807 // destination integer type after rounding towards zero. This `undef` value can cause UB in
808 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
809 // Semantically, the mathematical value of the input is rounded towards zero to the next
810 // mathematical integer, and then the result is clamped into the range of the destination
811 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
812 // the destination integer type. NaN is mapped to 0.
813 //
814 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
815 // a value representable in int_ty.
816 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
817 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
818 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
0531ce1d 819 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
abe05a73
XL
820 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
821 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
822 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
823 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
824 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
a1dfa0c6
XL
825 let int_max = |signed: bool, int_width: u64| -> u128 {
826 let shift_amount = 128 - int_width;
dfeec247 827 if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
a1dfa0c6
XL
828 };
829 let int_min = |signed: bool, int_width: u64| -> i128 {
dfeec247 830 if signed { i128::MIN >> (128 - int_width) } else { 0 }
a1dfa0c6
XL
831 };
832
dfeec247 833 let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
a1dfa0c6
XL
834 let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
835 assert_eq!(rounded_min.status, Status::OK);
836 let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
837 assert!(rounded_max.value.is_finite());
838 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
839 };
dfeec247 840 let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
a1dfa0c6
XL
841 let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
842 assert_eq!(rounded_min.status, Status::OK);
843 let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
844 assert!(rounded_max.value.is_finite());
845 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
846 };
847
848 let mut float_bits_to_llval = |bits| {
dfeec247 849 let bits_llval = match float_width {
a1dfa0c6
XL
850 32 => bx.cx().const_u32(bits as u32),
851 64 => bx.cx().const_u64(bits as u64),
abe05a73
XL
852 n => bug!("unsupported float width {}", n),
853 };
a1dfa0c6 854 bx.bitcast(bits_llval, float_ty)
abe05a73 855 };
a1dfa0c6
XL
856 let (f_min, f_max) = match float_width {
857 32 => compute_clamp_bounds_single(signed, int_width),
858 64 => compute_clamp_bounds_double(signed, int_width),
abe05a73
XL
859 n => bug!("unsupported float width {}", n),
860 };
861 let f_min = float_bits_to_llval(f_min);
862 let f_max = float_bits_to_llval(f_max);
863 // To implement saturation, we perform the following steps:
864 //
865 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
866 // 2. Compare x to f_min and f_max, and use the comparison results to select:
867 // a) int_ty::MIN if x < f_min or x is NaN
868 // b) int_ty::MAX if x > f_max
869 // c) the result of fpto[su]i otherwise
870 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
871 //
872 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
873 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
874 // undef does not introduce any non-determinism either.
875 // More importantly, the above procedure correctly implements saturating conversion.
876 // Proof (sketch):
877 // If x is NaN, 0 is returned by definition.
878 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
879 // This yields three cases to consider:
880 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
881 // saturating conversion for inputs in that range.
882 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
883 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
884 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
885 // is correct.
886 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
887 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
888 // QED.
889
a1dfa0c6
XL
890 let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
891 let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
3dfed10e
XL
892 let zero = bx.cx().const_uint(int_ty, 0);
893
894 // The codegen here differs quite a bit depending on whether our builder's
895 // `fptosi` and `fptoui` instructions may trap for out-of-bounds values. If
896 // they don't trap then we can start doing everything inline with a
897 // `select` instruction because it's ok to execute `fptosi` and `fptoui`
898 // even if we don't use the results.
899 if !bx.fptosui_may_trap(x, int_ty) {
900 // Step 1 ...
901 let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
902 let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
903 let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
904
905 // Step 2: We use two comparisons and two selects, with %s1 being the
906 // result:
907 // %less_or_nan = fcmp ult %x, %f_min
908 // %greater = fcmp olt %x, %f_max
909 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
910 // %s1 = select %greater, int_ty::MAX, %s0
911 // Note that %less_or_nan uses an *unordered* comparison. This
912 // comparison is true if the operands are not comparable (i.e., if x is
913 // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
914 // x is NaN.
915 //
916 // Performance note: Unordered comparison can be lowered to a "flipped"
917 // comparison and a negation, and the negation can be merged into the
918 // select. Therefore, it not necessarily any more expensive than a
919 // ordered ("normal") comparison. Whether these optimizations will be
920 // performed is ultimately up to the backend, but at least x86 does
921 // perform them.
922 let s0 = bx.select(less_or_nan, int_min, fptosui_result);
923 let s1 = bx.select(greater, int_max, s0);
924
925 // Step 3: NaN replacement.
926 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
927 // Therefore we only need to execute this step for signed integer types.
928 if signed {
929 // LLVM has no isNaN predicate, so we use (x == x) instead
930 let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
931 bx.select(cmp, s1, zero)
932 } else {
933 s1
934 }
abe05a73 935 } else {
3dfed10e
XL
936 // In this case we cannot execute `fptosi` or `fptoui` and then later
937 // discard the result. The builder is telling us that these instructions
938 // will trap on out-of-bounds values, so we need to use basic blocks and
939 // control flow to avoid executing the `fptosi` and `fptoui`
940 // instructions.
941 //
942 // The general idea of what we're constructing here is, for f64 -> i32:
943 //
944 // ;; block so far... %0 is the argument
945 // %result = alloca i32, align 4
946 // %inbound_lower = fcmp oge double %0, 0xC1E0000000000000
947 // %inbound_upper = fcmp ole double %0, 0x41DFFFFFFFC00000
948 // ;; match (inbound_lower, inbound_upper) {
949 // ;; (true, true) => %0 can be converted without trapping
950 // ;; (false, false) => %0 is a NaN
951 // ;; (true, false) => %0 is too large
952 // ;; (false, true) => %0 is too small
953 // ;; }
954 // ;;
955 // ;; The (true, true) check, go to %convert if so.
956 // %inbounds = and i1 %inbound_lower, %inbound_upper
957 // br i1 %inbounds, label %convert, label %specialcase
958 //
959 // convert:
960 // %cvt = call i32 @llvm.wasm.trunc.signed.i32.f64(double %0)
961 // store i32 %cvt, i32* %result, align 4
962 // br label %done
963 //
964 // specialcase:
965 // ;; Handle the cases where the number is NaN, too large or too small
966 //
967 // ;; Either (true, false) or (false, true)
968 // %is_not_nan = or i1 %inbound_lower, %inbound_upper
969 // ;; Figure out which saturated value we are interested in if not `NaN`
970 // %saturated = select i1 %inbound_lower, i32 2147483647, i32 -2147483648
971 // ;; Figure out between saturated and NaN representations
972 // %result_nan = select i1 %is_not_nan, i32 %saturated, i32 0
973 // store i32 %result_nan, i32* %result, align 4
974 // br label %done
975 //
976 // done:
977 // %r = load i32, i32* %result, align 4
978 // ;; ...
979 let done = bx.build_sibling_block("float_cast_done");
980 let mut convert = bx.build_sibling_block("float_cast_convert");
981 let mut specialcase = bx.build_sibling_block("float_cast_specialcase");
982
983 let result = PlaceRef::alloca(bx, int_layout);
984 result.storage_live(bx);
985
986 // Use control flow to figure out whether we can execute `fptosi` in a
987 // basic block, or whether we go to a different basic block to implement
988 // the saturating logic.
989 let inbound_lower = bx.fcmp(RealPredicate::RealOGE, x, f_min);
990 let inbound_upper = bx.fcmp(RealPredicate::RealOLE, x, f_max);
991 let inbounds = bx.and(inbound_lower, inbound_upper);
992 bx.cond_br(inbounds, convert.llbb(), specialcase.llbb());
993
994 // Translation of the `convert` basic block
995 let cvt = if signed { convert.fptosi(x, int_ty) } else { convert.fptoui(x, int_ty) };
996 convert.store(cvt, result.llval, result.align);
997 convert.br(done.llbb());
998
999 // Translation of the `specialcase` basic block. Note that like above
1000 // we try to be a bit clever here for unsigned conversions. In those
1001 // cases the `int_min` is zero so we don't need two select instructions,
1002 // just one to choose whether we need `int_max` or not. If
1003 // `inbound_lower` is true then we're guaranteed to not be `NaN` and
1004 // since we're greater than zero we must be saturating to `int_max`. If
1005 // `inbound_lower` is false then we're either NaN or less than zero, so
1006 // we saturate to zero.
1007 let result_nan = if signed {
1008 let is_not_nan = specialcase.or(inbound_lower, inbound_upper);
1009 let saturated = specialcase.select(inbound_lower, int_max, int_min);
1010 specialcase.select(is_not_nan, saturated, zero)
1011 } else {
1012 specialcase.select(inbound_lower, int_max, int_min)
1013 };
1014 specialcase.store(result_nan, result.llval, result.align);
1015 specialcase.br(done.llbb());
1016
1017 // Translation of the `done` basic block, positioning ourselves to
1018 // continue from that point as well.
1019 *bx = done;
1020 let ret = bx.load(result.llval, result.align);
1021 result.storage_dead(bx);
1022 ret
abe05a73
XL
1023 }
1024}