]> git.proxmox.com Git - rustc.git/blob - src/librustc_codegen_ssa/mir/rvalue.rs
New upstream version 1.47.0+dfsg1
[rustc.git] / src / librustc_codegen_ssa / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate, RealPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_apfloat::{ieee, Float, Round, Status};
11 use rustc_hir::lang_items::LangItem;
12 use rustc_middle::mir;
13 use rustc_middle::ty::cast::{CastTy, IntTy};
14 use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
15 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
16 use rustc_span::source_map::{Span, DUMMY_SP};
17 use rustc_span::symbol::sym;
18 use rustc_target::abi::{Abi, Int, LayoutOf, Variants};
19
20 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
21 pub fn codegen_rvalue(
22 &mut self,
23 mut bx: Bx,
24 dest: PlaceRef<'tcx, Bx::Value>,
25 rvalue: &mir::Rvalue<'tcx>,
26 ) -> Bx {
27 debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
28
29 match *rvalue {
30 mir::Rvalue::Use(ref operand) => {
31 let cg_operand = self.codegen_operand(&mut bx, operand);
32 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
33 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
34 cg_operand.val.store(&mut bx, dest);
35 bx
36 }
37
38 mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
39 // The destination necessarily contains a fat pointer, so if
40 // it's a scalar pair, it's a fat pointer or newtype thereof.
41 if bx.cx().is_backend_scalar_pair(dest.layout) {
42 // Into-coerce of a thin pointer to a fat pointer -- just
43 // use the operand path.
44 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
45 temp.val.store(&mut bx, dest);
46 return bx;
47 }
48
49 // Unsize of a nontrivial struct. I would prefer for
50 // this to be eliminated by MIR building, but
51 // `CoerceUnsized` can be passed by a where-clause,
52 // so the (generic) MIR may not be able to expand it.
53 let operand = self.codegen_operand(&mut bx, source);
54 match operand.val {
55 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
56 // Unsize from an immediate structure. We don't
57 // really need a temporary alloca here, but
58 // avoiding it would require us to have
59 // `coerce_unsized_into` use `extractvalue` to
60 // index into the struct, and this case isn't
61 // important enough for it.
62 debug!("codegen_rvalue: creating ugly alloca");
63 let scratch = PlaceRef::alloca(&mut bx, operand.layout);
64 scratch.storage_live(&mut bx);
65 operand.val.store(&mut bx, scratch);
66 base::coerce_unsized_into(&mut bx, scratch, dest);
67 scratch.storage_dead(&mut bx);
68 }
69 OperandValue::Ref(llref, None, align) => {
70 let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
71 base::coerce_unsized_into(&mut bx, source, dest);
72 }
73 OperandValue::Ref(_, Some(_), _) => {
74 bug!("unsized coercion on an unsized rvalue");
75 }
76 }
77 bx
78 }
79
80 mir::Rvalue::Repeat(ref elem, count) => {
81 let cg_elem = self.codegen_operand(&mut bx, elem);
82
83 // Do not generate the loop for zero-sized elements or empty arrays.
84 if dest.layout.is_zst() {
85 return bx;
86 }
87
88 if let OperandValue::Immediate(v) = cg_elem.val {
89 let zero = bx.const_usize(0);
90 let start = dest.project_index(&mut bx, zero).llval;
91 let size = bx.const_usize(dest.layout.size.bytes());
92
93 // Use llvm.memset.p0i8.* to initialize all zero arrays
94 if bx.cx().const_to_opt_uint(v) == Some(0) {
95 let fill = bx.cx().const_u8(0);
96 bx.memset(start, fill, size, dest.align, MemFlags::empty());
97 return bx;
98 }
99
100 // Use llvm.memset.p0i8.* to initialize byte arrays
101 let v = base::from_immediate(&mut bx, v);
102 if bx.cx().val_ty(v) == bx.cx().type_i8() {
103 bx.memset(start, v, size, dest.align, MemFlags::empty());
104 return bx;
105 }
106 }
107
108 let count =
109 self.monomorphize(&count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
110
111 bx.write_operand_repeatedly(cg_elem, count, dest)
112 }
113
114 mir::Rvalue::Aggregate(ref kind, ref operands) => {
115 let (dest, active_field_index) = match **kind {
116 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
117 dest.codegen_set_discr(&mut bx, variant_index);
118 if adt_def.is_enum() {
119 (dest.project_downcast(&mut bx, variant_index), active_field_index)
120 } else {
121 (dest, active_field_index)
122 }
123 }
124 _ => (dest, None),
125 };
126 for (i, operand) in operands.iter().enumerate() {
127 let op = self.codegen_operand(&mut bx, operand);
128 // Do not generate stores and GEPis for zero-sized fields.
129 if !op.layout.is_zst() {
130 let field_index = active_field_index.unwrap_or(i);
131 let field = dest.project_field(&mut bx, field_index);
132 op.val.store(&mut bx, field);
133 }
134 }
135 bx
136 }
137
138 _ => {
139 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
140 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
141 temp.val.store(&mut bx, dest);
142 bx
143 }
144 }
145 }
146
147 pub fn codegen_rvalue_unsized(
148 &mut self,
149 mut bx: Bx,
150 indirect_dest: PlaceRef<'tcx, Bx::Value>,
151 rvalue: &mir::Rvalue<'tcx>,
152 ) -> Bx {
153 debug!(
154 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
155 indirect_dest.llval, rvalue
156 );
157
158 match *rvalue {
159 mir::Rvalue::Use(ref operand) => {
160 let cg_operand = self.codegen_operand(&mut bx, operand);
161 cg_operand.val.store_unsized(&mut bx, indirect_dest);
162 bx
163 }
164
165 _ => bug!("unsized assignment other than `Rvalue::Use`"),
166 }
167 }
168
169 pub fn codegen_rvalue_operand(
170 &mut self,
171 mut bx: Bx,
172 rvalue: &mir::Rvalue<'tcx>,
173 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
174 assert!(
175 self.rvalue_creates_operand(rvalue, DUMMY_SP),
176 "cannot codegen {:?} to operand",
177 rvalue,
178 );
179
180 match *rvalue {
181 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
182 let operand = self.codegen_operand(&mut bx, source);
183 debug!("cast operand is {:?}", operand);
184 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
185
186 let val = match *kind {
187 mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
188 match operand.layout.ty.kind {
189 ty::FnDef(def_id, substs) => {
190 if bx.cx().tcx().has_attr(def_id, sym::rustc_args_required_const) {
191 bug!("reifying a fn ptr that requires const arguments");
192 }
193 let instance = ty::Instance::resolve_for_fn_ptr(
194 bx.tcx(),
195 ty::ParamEnv::reveal_all(),
196 def_id,
197 substs,
198 )
199 .unwrap()
200 .polymorphize(bx.cx().tcx());
201 OperandValue::Immediate(bx.get_fn_addr(instance))
202 }
203 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
204 }
205 }
206 mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
207 match operand.layout.ty.kind {
208 ty::Closure(def_id, substs) => {
209 let instance = Instance::resolve_closure(
210 bx.cx().tcx(),
211 def_id,
212 substs,
213 ty::ClosureKind::FnOnce,
214 )
215 .polymorphize(bx.cx().tcx());
216 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
217 }
218 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
219 }
220 }
221 mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
222 // This is a no-op at the LLVM level.
223 operand.val
224 }
225 mir::CastKind::Pointer(PointerCast::Unsize) => {
226 assert!(bx.cx().is_backend_scalar_pair(cast));
227 match operand.val {
228 OperandValue::Pair(lldata, llextra) => {
229 // unsize from a fat pointer -- this is a
230 // "trait-object-to-supertrait" coercion, for
231 // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
232
233 // HACK(eddyb) have to bitcast pointers
234 // until LLVM removes pointee types.
235 let lldata = bx.pointercast(
236 lldata,
237 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
238 );
239 OperandValue::Pair(lldata, llextra)
240 }
241 OperandValue::Immediate(lldata) => {
242 // "standard" unsize
243 let (lldata, llextra) = base::unsize_thin_ptr(
244 &mut bx,
245 lldata,
246 operand.layout.ty,
247 cast.ty,
248 );
249 OperandValue::Pair(lldata, llextra)
250 }
251 OperandValue::Ref(..) => {
252 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
253 }
254 }
255 }
256 mir::CastKind::Pointer(PointerCast::MutToConstPointer)
257 | mir::CastKind::Misc
258 if bx.cx().is_backend_scalar_pair(operand.layout) =>
259 {
260 if let OperandValue::Pair(data_ptr, meta) = operand.val {
261 if bx.cx().is_backend_scalar_pair(cast) {
262 let data_cast = bx.pointercast(
263 data_ptr,
264 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
265 );
266 OperandValue::Pair(data_cast, meta)
267 } else {
268 // cast to thin-ptr
269 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
270 // pointer-cast of that pointer to desired pointer type.
271 let llcast_ty = bx.cx().immediate_backend_type(cast);
272 let llval = bx.pointercast(data_ptr, llcast_ty);
273 OperandValue::Immediate(llval)
274 }
275 } else {
276 bug!("unexpected non-pair operand");
277 }
278 }
279 mir::CastKind::Pointer(
280 PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
281 )
282 | mir::CastKind::Misc => {
283 assert!(bx.cx().is_backend_immediate(cast));
284 let ll_t_out = bx.cx().immediate_backend_type(cast);
285 if operand.layout.abi.is_uninhabited() {
286 let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
287 return (bx, OperandRef { val, layout: cast });
288 }
289 let r_t_in =
290 CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
291 let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
292 let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
293 match operand.layout.variants {
294 Variants::Single { index } => {
295 if let Some(discr) =
296 operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
297 {
298 let discr_layout = bx.cx().layout_of(discr.ty);
299 let discr_t = bx.cx().immediate_backend_type(discr_layout);
300 let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
301 let discr_val =
302 bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
303
304 return (
305 bx,
306 OperandRef {
307 val: OperandValue::Immediate(discr_val),
308 layout: cast,
309 },
310 );
311 }
312 }
313 Variants::Multiple { .. } => {}
314 }
315 let llval = operand.immediate();
316
317 let mut signed = false;
318 if let Abi::Scalar(ref scalar) = operand.layout.abi {
319 if let Int(_, s) = scalar.value {
320 // We use `i1` for bytes that are always `0` or `1`,
321 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
322 // let LLVM interpret the `i1` as signed, because
323 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
324 signed = !scalar.is_bool() && s;
325
326 let er = scalar.valid_range_exclusive(bx.cx());
327 if er.end != er.start
328 && scalar.valid_range.end() > scalar.valid_range.start()
329 {
330 // We want `table[e as usize]` to not
331 // have bound checks, and this is the most
332 // convenient place to put the `assume`.
333 let ll_t_in_const =
334 bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
335 let cmp = bx.icmp(IntPredicate::IntULE, llval, ll_t_in_const);
336 bx.assume(cmp);
337 }
338 }
339 }
340
341 let newval = match (r_t_in, r_t_out) {
342 (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
343 (CastTy::Float, CastTy::Float) => {
344 let srcsz = bx.cx().float_width(ll_t_in);
345 let dstsz = bx.cx().float_width(ll_t_out);
346 if dstsz > srcsz {
347 bx.fpext(llval, ll_t_out)
348 } else if srcsz > dstsz {
349 bx.fptrunc(llval, ll_t_out)
350 } else {
351 llval
352 }
353 }
354 (CastTy::Int(_), CastTy::Float) => {
355 if signed {
356 bx.sitofp(llval, ll_t_out)
357 } else {
358 bx.uitofp(llval, ll_t_out)
359 }
360 }
361 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
362 bx.pointercast(llval, ll_t_out)
363 }
364 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
365 bx.ptrtoint(llval, ll_t_out)
366 }
367 (CastTy::Int(_), CastTy::Ptr(_)) => {
368 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
369 bx.inttoptr(usize_llval, ll_t_out)
370 }
371 (CastTy::Float, CastTy::Int(IntTy::I)) => {
372 cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out, cast)
373 }
374 (CastTy::Float, CastTy::Int(_)) => {
375 cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out, cast)
376 }
377 _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
378 };
379 OperandValue::Immediate(newval)
380 }
381 };
382 (bx, OperandRef { val, layout: cast })
383 }
384
385 mir::Rvalue::Ref(_, bk, place) => {
386 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
387 tcx.mk_ref(
388 tcx.lifetimes.re_erased,
389 ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
390 )
391 };
392 self.codegen_place_to_pointer(bx, place, mk_ref)
393 }
394
395 mir::Rvalue::AddressOf(mutability, place) => {
396 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
397 tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
398 };
399 self.codegen_place_to_pointer(bx, place, mk_ptr)
400 }
401
402 mir::Rvalue::Len(place) => {
403 let size = self.evaluate_array_len(&mut bx, place);
404 let operand = OperandRef {
405 val: OperandValue::Immediate(size),
406 layout: bx.cx().layout_of(bx.tcx().types.usize),
407 };
408 (bx, operand)
409 }
410
411 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
412 let lhs = self.codegen_operand(&mut bx, lhs);
413 let rhs = self.codegen_operand(&mut bx, rhs);
414 let llresult = match (lhs.val, rhs.val) {
415 (
416 OperandValue::Pair(lhs_addr, lhs_extra),
417 OperandValue::Pair(rhs_addr, rhs_extra),
418 ) => self.codegen_fat_ptr_binop(
419 &mut bx,
420 op,
421 lhs_addr,
422 lhs_extra,
423 rhs_addr,
424 rhs_extra,
425 lhs.layout.ty,
426 ),
427
428 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
429 self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
430 }
431
432 _ => bug!(),
433 };
434 let operand = OperandRef {
435 val: OperandValue::Immediate(llresult),
436 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
437 };
438 (bx, operand)
439 }
440 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
441 let lhs = self.codegen_operand(&mut bx, lhs);
442 let rhs = self.codegen_operand(&mut bx, rhs);
443 let result = self.codegen_scalar_checked_binop(
444 &mut bx,
445 op,
446 lhs.immediate(),
447 rhs.immediate(),
448 lhs.layout.ty,
449 );
450 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
451 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
452 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
453
454 (bx, operand)
455 }
456
457 mir::Rvalue::UnaryOp(op, ref operand) => {
458 let operand = self.codegen_operand(&mut bx, operand);
459 let lloperand = operand.immediate();
460 let is_float = operand.layout.ty.is_floating_point();
461 let llval = match op {
462 mir::UnOp::Not => bx.not(lloperand),
463 mir::UnOp::Neg => {
464 if is_float {
465 bx.fneg(lloperand)
466 } else {
467 bx.neg(lloperand)
468 }
469 }
470 };
471 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
472 }
473
474 mir::Rvalue::Discriminant(ref place) => {
475 let discr_ty = rvalue.ty(self.mir, bx.tcx());
476 let discr = self
477 .codegen_place(&mut bx, place.as_ref())
478 .codegen_get_discr(&mut bx, discr_ty);
479 (
480 bx,
481 OperandRef {
482 val: OperandValue::Immediate(discr),
483 layout: self.cx.layout_of(discr_ty),
484 },
485 )
486 }
487
488 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
489 assert!(bx.cx().type_is_sized(ty));
490 let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
491 let tcx = self.cx.tcx();
492 (
493 bx,
494 OperandRef {
495 val: OperandValue::Immediate(val),
496 layout: self.cx.layout_of(tcx.types.usize),
497 },
498 )
499 }
500
501 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
502 let content_ty = self.monomorphize(&content_ty);
503 let content_layout = bx.cx().layout_of(content_ty);
504 let llsize = bx.cx().const_usize(content_layout.size.bytes());
505 let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
506 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
507 let llty_ptr = bx.cx().backend_type(box_layout);
508
509 // Allocate space:
510 let def_id = match bx.tcx().lang_items().require(LangItem::ExchangeMalloc) {
511 Ok(id) => id,
512 Err(s) => {
513 bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
514 }
515 };
516 let instance = ty::Instance::mono(bx.tcx(), def_id);
517 let r = bx.cx().get_fn_addr(instance);
518 let call = bx.call(r, &[llsize, llalign], None);
519 let val = bx.pointercast(call, llty_ptr);
520
521 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
522 (bx, operand)
523 }
524 mir::Rvalue::ThreadLocalRef(def_id) => {
525 assert!(bx.cx().tcx().is_static(def_id));
526 let static_ = bx.get_static(def_id);
527 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
528 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
529 (bx, operand)
530 }
531 mir::Rvalue::Use(ref operand) => {
532 let operand = self.codegen_operand(&mut bx, operand);
533 (bx, operand)
534 }
535 mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
536 // According to `rvalue_creates_operand`, only ZST
537 // aggregate rvalues are allowed to be operands.
538 let ty = rvalue.ty(self.mir, self.cx.tcx());
539 let operand =
540 OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty)));
541 (bx, operand)
542 }
543 }
544 }
545
546 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
547 // ZST are passed as operands and require special handling
548 // because codegen_place() panics if Local is operand.
549 if let Some(index) = place.as_local() {
550 if let LocalRef::Operand(Some(op)) = self.locals[index] {
551 if let ty::Array(_, n) = op.layout.ty.kind {
552 let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
553 return bx.cx().const_usize(n);
554 }
555 }
556 }
557 // use common size calculation for non zero-sized types
558 let cg_value = self.codegen_place(bx, place.as_ref());
559 cg_value.len(bx.cx())
560 }
561
562 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
563 fn codegen_place_to_pointer(
564 &mut self,
565 mut bx: Bx,
566 place: mir::Place<'tcx>,
567 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
568 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
569 let cg_place = self.codegen_place(&mut bx, place.as_ref());
570
571 let ty = cg_place.layout.ty;
572
573 // Note: places are indirect, so storing the `llval` into the
574 // destination effectively creates a reference.
575 let val = if !bx.cx().type_has_metadata(ty) {
576 OperandValue::Immediate(cg_place.llval)
577 } else {
578 OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
579 };
580 (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
581 }
582
583 pub fn codegen_scalar_binop(
584 &mut self,
585 bx: &mut Bx,
586 op: mir::BinOp,
587 lhs: Bx::Value,
588 rhs: Bx::Value,
589 input_ty: Ty<'tcx>,
590 ) -> Bx::Value {
591 let is_float = input_ty.is_floating_point();
592 let is_signed = input_ty.is_signed();
593 match op {
594 mir::BinOp::Add => {
595 if is_float {
596 bx.fadd(lhs, rhs)
597 } else {
598 bx.add(lhs, rhs)
599 }
600 }
601 mir::BinOp::Sub => {
602 if is_float {
603 bx.fsub(lhs, rhs)
604 } else {
605 bx.sub(lhs, rhs)
606 }
607 }
608 mir::BinOp::Mul => {
609 if is_float {
610 bx.fmul(lhs, rhs)
611 } else {
612 bx.mul(lhs, rhs)
613 }
614 }
615 mir::BinOp::Div => {
616 if is_float {
617 bx.fdiv(lhs, rhs)
618 } else if is_signed {
619 bx.sdiv(lhs, rhs)
620 } else {
621 bx.udiv(lhs, rhs)
622 }
623 }
624 mir::BinOp::Rem => {
625 if is_float {
626 bx.frem(lhs, rhs)
627 } else if is_signed {
628 bx.srem(lhs, rhs)
629 } else {
630 bx.urem(lhs, rhs)
631 }
632 }
633 mir::BinOp::BitOr => bx.or(lhs, rhs),
634 mir::BinOp::BitAnd => bx.and(lhs, rhs),
635 mir::BinOp::BitXor => bx.xor(lhs, rhs),
636 mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
637 mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
638 mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
639 mir::BinOp::Ne
640 | mir::BinOp::Lt
641 | mir::BinOp::Gt
642 | mir::BinOp::Eq
643 | mir::BinOp::Le
644 | mir::BinOp::Ge => {
645 if is_float {
646 bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
647 } else {
648 bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
649 }
650 }
651 }
652 }
653
654 pub fn codegen_fat_ptr_binop(
655 &mut self,
656 bx: &mut Bx,
657 op: mir::BinOp,
658 lhs_addr: Bx::Value,
659 lhs_extra: Bx::Value,
660 rhs_addr: Bx::Value,
661 rhs_extra: Bx::Value,
662 _input_ty: Ty<'tcx>,
663 ) -> Bx::Value {
664 match op {
665 mir::BinOp::Eq => {
666 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
667 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
668 bx.and(lhs, rhs)
669 }
670 mir::BinOp::Ne => {
671 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
672 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
673 bx.or(lhs, rhs)
674 }
675 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
676 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
677 let (op, strict_op) = match op {
678 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
679 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
680 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
681 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
682 _ => bug!(),
683 };
684 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
685 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
686 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
687 let rhs = bx.and(and_lhs, and_rhs);
688 bx.or(lhs, rhs)
689 }
690 _ => {
691 bug!("unexpected fat ptr binop");
692 }
693 }
694 }
695
696 pub fn codegen_scalar_checked_binop(
697 &mut self,
698 bx: &mut Bx,
699 op: mir::BinOp,
700 lhs: Bx::Value,
701 rhs: Bx::Value,
702 input_ty: Ty<'tcx>,
703 ) -> OperandValue<Bx::Value> {
704 // This case can currently arise only from functions marked
705 // with #[rustc_inherit_overflow_checks] and inlined from
706 // another crate (mostly core::num generic/#[inline] fns),
707 // while the current crate doesn't use overflow checks.
708 if !bx.cx().check_overflow() {
709 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
710 return OperandValue::Pair(val, bx.cx().const_bool(false));
711 }
712
713 let (val, of) = match op {
714 // These are checked using intrinsics
715 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
716 let oop = match op {
717 mir::BinOp::Add => OverflowOp::Add,
718 mir::BinOp::Sub => OverflowOp::Sub,
719 mir::BinOp::Mul => OverflowOp::Mul,
720 _ => unreachable!(),
721 };
722 bx.checked_binop(oop, input_ty, lhs, rhs)
723 }
724 mir::BinOp::Shl | mir::BinOp::Shr => {
725 let lhs_llty = bx.cx().val_ty(lhs);
726 let rhs_llty = bx.cx().val_ty(rhs);
727 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
728 let outer_bits = bx.and(rhs, invert_mask);
729
730 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
731 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
732
733 (val, of)
734 }
735 _ => bug!("Operator `{:?}` is not a checkable operator", op),
736 };
737
738 OperandValue::Pair(val, of)
739 }
740 }
741
742 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
743 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
744 match *rvalue {
745 mir::Rvalue::Ref(..) |
746 mir::Rvalue::AddressOf(..) |
747 mir::Rvalue::Len(..) |
748 mir::Rvalue::Cast(..) | // (*)
749 mir::Rvalue::BinaryOp(..) |
750 mir::Rvalue::CheckedBinaryOp(..) |
751 mir::Rvalue::UnaryOp(..) |
752 mir::Rvalue::Discriminant(..) |
753 mir::Rvalue::NullaryOp(..) |
754 mir::Rvalue::ThreadLocalRef(_) |
755 mir::Rvalue::Use(..) => // (*)
756 true,
757 mir::Rvalue::Repeat(..) |
758 mir::Rvalue::Aggregate(..) => {
759 let ty = rvalue.ty(self.mir, self.cx.tcx());
760 let ty = self.monomorphize(&ty);
761 self.cx.spanned_layout_of(ty, span).is_zst()
762 }
763 }
764
765 // (*) this is only true if the type is suitable
766 }
767 }
768
769 fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
770 bx: &mut Bx,
771 signed: bool,
772 x: Bx::Value,
773 float_ty: Bx::Type,
774 int_ty: Bx::Type,
775 int_layout: TyAndLayout<'tcx>,
776 ) -> Bx::Value {
777 if let Some(false) = bx.cx().sess().opts.debugging_opts.saturating_float_casts {
778 return if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
779 }
780
781 let try_sat_result = if signed { bx.fptosi_sat(x, int_ty) } else { bx.fptoui_sat(x, int_ty) };
782 if let Some(try_sat_result) = try_sat_result {
783 return try_sat_result;
784 }
785
786 let int_width = bx.cx().int_width(int_ty);
787 let float_width = bx.cx().float_width(float_ty);
788 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
789 // destination integer type after rounding towards zero. This `undef` value can cause UB in
790 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
791 // Semantically, the mathematical value of the input is rounded towards zero to the next
792 // mathematical integer, and then the result is clamped into the range of the destination
793 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
794 // the destination integer type. NaN is mapped to 0.
795 //
796 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
797 // a value representable in int_ty.
798 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
799 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
800 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
801 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
802 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
803 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
804 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
805 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
806 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
807 let int_max = |signed: bool, int_width: u64| -> u128 {
808 let shift_amount = 128 - int_width;
809 if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
810 };
811 let int_min = |signed: bool, int_width: u64| -> i128 {
812 if signed { i128::MIN >> (128 - int_width) } else { 0 }
813 };
814
815 let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
816 let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
817 assert_eq!(rounded_min.status, Status::OK);
818 let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
819 assert!(rounded_max.value.is_finite());
820 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
821 };
822 let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
823 let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
824 assert_eq!(rounded_min.status, Status::OK);
825 let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
826 assert!(rounded_max.value.is_finite());
827 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
828 };
829
830 let mut float_bits_to_llval = |bits| {
831 let bits_llval = match float_width {
832 32 => bx.cx().const_u32(bits as u32),
833 64 => bx.cx().const_u64(bits as u64),
834 n => bug!("unsupported float width {}", n),
835 };
836 bx.bitcast(bits_llval, float_ty)
837 };
838 let (f_min, f_max) = match float_width {
839 32 => compute_clamp_bounds_single(signed, int_width),
840 64 => compute_clamp_bounds_double(signed, int_width),
841 n => bug!("unsupported float width {}", n),
842 };
843 let f_min = float_bits_to_llval(f_min);
844 let f_max = float_bits_to_llval(f_max);
845 // To implement saturation, we perform the following steps:
846 //
847 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
848 // 2. Compare x to f_min and f_max, and use the comparison results to select:
849 // a) int_ty::MIN if x < f_min or x is NaN
850 // b) int_ty::MAX if x > f_max
851 // c) the result of fpto[su]i otherwise
852 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
853 //
854 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
855 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
856 // undef does not introduce any non-determinism either.
857 // More importantly, the above procedure correctly implements saturating conversion.
858 // Proof (sketch):
859 // If x is NaN, 0 is returned by definition.
860 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
861 // This yields three cases to consider:
862 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
863 // saturating conversion for inputs in that range.
864 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
865 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
866 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
867 // is correct.
868 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
869 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
870 // QED.
871
872 let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
873 let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
874 let zero = bx.cx().const_uint(int_ty, 0);
875
876 // The codegen here differs quite a bit depending on whether our builder's
877 // `fptosi` and `fptoui` instructions may trap for out-of-bounds values. If
878 // they don't trap then we can start doing everything inline with a
879 // `select` instruction because it's ok to execute `fptosi` and `fptoui`
880 // even if we don't use the results.
881 if !bx.fptosui_may_trap(x, int_ty) {
882 // Step 1 ...
883 let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
884 let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
885 let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
886
887 // Step 2: We use two comparisons and two selects, with %s1 being the
888 // result:
889 // %less_or_nan = fcmp ult %x, %f_min
890 // %greater = fcmp olt %x, %f_max
891 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
892 // %s1 = select %greater, int_ty::MAX, %s0
893 // Note that %less_or_nan uses an *unordered* comparison. This
894 // comparison is true if the operands are not comparable (i.e., if x is
895 // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
896 // x is NaN.
897 //
898 // Performance note: Unordered comparison can be lowered to a "flipped"
899 // comparison and a negation, and the negation can be merged into the
900 // select. Therefore, it not necessarily any more expensive than a
901 // ordered ("normal") comparison. Whether these optimizations will be
902 // performed is ultimately up to the backend, but at least x86 does
903 // perform them.
904 let s0 = bx.select(less_or_nan, int_min, fptosui_result);
905 let s1 = bx.select(greater, int_max, s0);
906
907 // Step 3: NaN replacement.
908 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
909 // Therefore we only need to execute this step for signed integer types.
910 if signed {
911 // LLVM has no isNaN predicate, so we use (x == x) instead
912 let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
913 bx.select(cmp, s1, zero)
914 } else {
915 s1
916 }
917 } else {
918 // In this case we cannot execute `fptosi` or `fptoui` and then later
919 // discard the result. The builder is telling us that these instructions
920 // will trap on out-of-bounds values, so we need to use basic blocks and
921 // control flow to avoid executing the `fptosi` and `fptoui`
922 // instructions.
923 //
924 // The general idea of what we're constructing here is, for f64 -> i32:
925 //
926 // ;; block so far... %0 is the argument
927 // %result = alloca i32, align 4
928 // %inbound_lower = fcmp oge double %0, 0xC1E0000000000000
929 // %inbound_upper = fcmp ole double %0, 0x41DFFFFFFFC00000
930 // ;; match (inbound_lower, inbound_upper) {
931 // ;; (true, true) => %0 can be converted without trapping
932 // ;; (false, false) => %0 is a NaN
933 // ;; (true, false) => %0 is too large
934 // ;; (false, true) => %0 is too small
935 // ;; }
936 // ;;
937 // ;; The (true, true) check, go to %convert if so.
938 // %inbounds = and i1 %inbound_lower, %inbound_upper
939 // br i1 %inbounds, label %convert, label %specialcase
940 //
941 // convert:
942 // %cvt = call i32 @llvm.wasm.trunc.signed.i32.f64(double %0)
943 // store i32 %cvt, i32* %result, align 4
944 // br label %done
945 //
946 // specialcase:
947 // ;; Handle the cases where the number is NaN, too large or too small
948 //
949 // ;; Either (true, false) or (false, true)
950 // %is_not_nan = or i1 %inbound_lower, %inbound_upper
951 // ;; Figure out which saturated value we are interested in if not `NaN`
952 // %saturated = select i1 %inbound_lower, i32 2147483647, i32 -2147483648
953 // ;; Figure out between saturated and NaN representations
954 // %result_nan = select i1 %is_not_nan, i32 %saturated, i32 0
955 // store i32 %result_nan, i32* %result, align 4
956 // br label %done
957 //
958 // done:
959 // %r = load i32, i32* %result, align 4
960 // ;; ...
961 let done = bx.build_sibling_block("float_cast_done");
962 let mut convert = bx.build_sibling_block("float_cast_convert");
963 let mut specialcase = bx.build_sibling_block("float_cast_specialcase");
964
965 let result = PlaceRef::alloca(bx, int_layout);
966 result.storage_live(bx);
967
968 // Use control flow to figure out whether we can execute `fptosi` in a
969 // basic block, or whether we go to a different basic block to implement
970 // the saturating logic.
971 let inbound_lower = bx.fcmp(RealPredicate::RealOGE, x, f_min);
972 let inbound_upper = bx.fcmp(RealPredicate::RealOLE, x, f_max);
973 let inbounds = bx.and(inbound_lower, inbound_upper);
974 bx.cond_br(inbounds, convert.llbb(), specialcase.llbb());
975
976 // Translation of the `convert` basic block
977 let cvt = if signed { convert.fptosi(x, int_ty) } else { convert.fptoui(x, int_ty) };
978 convert.store(cvt, result.llval, result.align);
979 convert.br(done.llbb());
980
981 // Translation of the `specialcase` basic block. Note that like above
982 // we try to be a bit clever here for unsigned conversions. In those
983 // cases the `int_min` is zero so we don't need two select instructions,
984 // just one to choose whether we need `int_max` or not. If
985 // `inbound_lower` is true then we're guaranteed to not be `NaN` and
986 // since we're greater than zero we must be saturating to `int_max`. If
987 // `inbound_lower` is false then we're either NaN or less than zero, so
988 // we saturate to zero.
989 let result_nan = if signed {
990 let is_not_nan = specialcase.or(inbound_lower, inbound_upper);
991 let saturated = specialcase.select(inbound_lower, int_max, int_min);
992 specialcase.select(is_not_nan, saturated, zero)
993 } else {
994 specialcase.select(inbound_lower, int_max, int_min)
995 };
996 specialcase.store(result_nan, result.llval, result.align);
997 specialcase.br(done.llbb());
998
999 // Translation of the `done` basic block, positioning ourselves to
1000 // continue from that point as well.
1001 *bx = done;
1002 let ret = bx.load(result.llval, result.align);
1003 result.storage_dead(bx);
1004 ret
1005 }
1006 }