]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_ssa/src/mir/rvalue.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / rvalue.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9
10 use rustc_middle::mir;
11 use rustc_middle::ty::cast::{CastTy, IntTy};
12 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
13 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
14 use rustc_span::source_map::{Span, DUMMY_SP};
15 use rustc_target::abi::{Abi, Int, Variants};
16
17 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
18 pub fn codegen_rvalue(
19 &mut self,
20 mut bx: Bx,
21 dest: PlaceRef<'tcx, Bx::Value>,
22 rvalue: &mir::Rvalue<'tcx>,
23 ) -> Bx {
24 debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
25
26 match *rvalue {
27 mir::Rvalue::Use(ref operand) => {
28 let cg_operand = self.codegen_operand(&mut bx, operand);
29 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
30 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
31 cg_operand.val.store(&mut bx, dest);
32 bx
33 }
34
35 mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
36 // The destination necessarily contains a fat pointer, so if
37 // it's a scalar pair, it's a fat pointer or newtype thereof.
38 if bx.cx().is_backend_scalar_pair(dest.layout) {
39 // Into-coerce of a thin pointer to a fat pointer -- just
40 // use the operand path.
41 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
42 temp.val.store(&mut bx, dest);
43 return bx;
44 }
45
46 // Unsize of a nontrivial struct. I would prefer for
47 // this to be eliminated by MIR building, but
48 // `CoerceUnsized` can be passed by a where-clause,
49 // so the (generic) MIR may not be able to expand it.
50 let operand = self.codegen_operand(&mut bx, source);
51 match operand.val {
52 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
53 // Unsize from an immediate structure. We don't
54 // really need a temporary alloca here, but
55 // avoiding it would require us to have
56 // `coerce_unsized_into` use `extractvalue` to
57 // index into the struct, and this case isn't
58 // important enough for it.
59 debug!("codegen_rvalue: creating ugly alloca");
60 let scratch = PlaceRef::alloca(&mut bx, operand.layout);
61 scratch.storage_live(&mut bx);
62 operand.val.store(&mut bx, scratch);
63 base::coerce_unsized_into(&mut bx, scratch, dest);
64 scratch.storage_dead(&mut bx);
65 }
66 OperandValue::Ref(llref, None, align) => {
67 let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
68 base::coerce_unsized_into(&mut bx, source, dest);
69 }
70 OperandValue::Ref(_, Some(_), _) => {
71 bug!("unsized coercion on an unsized rvalue");
72 }
73 }
74 bx
75 }
76
77 mir::Rvalue::Repeat(ref elem, count) => {
78 let cg_elem = self.codegen_operand(&mut bx, elem);
79
80 // Do not generate the loop for zero-sized elements or empty arrays.
81 if dest.layout.is_zst() {
82 return bx;
83 }
84
85 if let OperandValue::Immediate(v) = cg_elem.val {
86 let zero = bx.const_usize(0);
87 let start = dest.project_index(&mut bx, zero).llval;
88 let size = bx.const_usize(dest.layout.size.bytes());
89
90 // Use llvm.memset.p0i8.* to initialize all zero arrays
91 if bx.cx().const_to_opt_uint(v) == Some(0) {
92 let fill = bx.cx().const_u8(0);
93 bx.memset(start, fill, size, dest.align, MemFlags::empty());
94 return bx;
95 }
96
97 // Use llvm.memset.p0i8.* to initialize byte arrays
98 let v = bx.from_immediate(v);
99 if bx.cx().val_ty(v) == bx.cx().type_i8() {
100 bx.memset(start, v, size, dest.align, MemFlags::empty());
101 return bx;
102 }
103 }
104
105 let count =
106 self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
107
108 bx.write_operand_repeatedly(cg_elem, count, dest)
109 }
110
111 mir::Rvalue::Aggregate(ref kind, ref operands) => {
112 let (dest, active_field_index) = match **kind {
113 mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
114 dest.codegen_set_discr(&mut bx, variant_index);
115 if bx.tcx().adt_def(adt_did).is_enum() {
116 (dest.project_downcast(&mut bx, variant_index), active_field_index)
117 } else {
118 (dest, active_field_index)
119 }
120 }
121 _ => (dest, None),
122 };
123 for (i, operand) in operands.iter().enumerate() {
124 let op = self.codegen_operand(&mut bx, operand);
125 // Do not generate stores and GEPis for zero-sized fields.
126 if !op.layout.is_zst() {
127 let field_index = active_field_index.unwrap_or(i);
128 let field = dest.project_field(&mut bx, field_index);
129 op.val.store(&mut bx, field);
130 }
131 }
132 bx
133 }
134
135 _ => {
136 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
137 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
138 temp.val.store(&mut bx, dest);
139 bx
140 }
141 }
142 }
143
144 pub fn codegen_rvalue_unsized(
145 &mut self,
146 mut bx: Bx,
147 indirect_dest: PlaceRef<'tcx, Bx::Value>,
148 rvalue: &mir::Rvalue<'tcx>,
149 ) -> Bx {
150 debug!(
151 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
152 indirect_dest.llval, rvalue
153 );
154
155 match *rvalue {
156 mir::Rvalue::Use(ref operand) => {
157 let cg_operand = self.codegen_operand(&mut bx, operand);
158 cg_operand.val.store_unsized(&mut bx, indirect_dest);
159 bx
160 }
161
162 _ => bug!("unsized assignment other than `Rvalue::Use`"),
163 }
164 }
165
166 pub fn codegen_rvalue_operand(
167 &mut self,
168 mut bx: Bx,
169 rvalue: &mir::Rvalue<'tcx>,
170 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
171 assert!(
172 self.rvalue_creates_operand(rvalue, DUMMY_SP),
173 "cannot codegen {:?} to operand",
174 rvalue,
175 );
176
177 match *rvalue {
178 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
179 let operand = self.codegen_operand(&mut bx, source);
180 debug!("cast operand is {:?}", operand);
181 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
182
183 let val = match *kind {
184 mir::CastKind::PointerExposeAddress => {
185 assert!(bx.cx().is_backend_immediate(cast));
186 let llptr = operand.immediate();
187 let llcast_ty = bx.cx().immediate_backend_type(cast);
188 let lladdr = bx.ptrtoint(llptr, llcast_ty);
189 OperandValue::Immediate(lladdr)
190 }
191 mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
192 match *operand.layout.ty.kind() {
193 ty::FnDef(def_id, substs) => {
194 let instance = ty::Instance::resolve_for_fn_ptr(
195 bx.tcx(),
196 ty::ParamEnv::reveal_all(),
197 def_id,
198 substs,
199 )
200 .unwrap()
201 .polymorphize(bx.cx().tcx());
202 OperandValue::Immediate(bx.get_fn_addr(instance))
203 }
204 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
205 }
206 }
207 mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
208 match *operand.layout.ty.kind() {
209 ty::Closure(def_id, substs) => {
210 let instance = Instance::resolve_closure(
211 bx.cx().tcx(),
212 def_id,
213 substs,
214 ty::ClosureKind::FnOnce,
215 )
216 .polymorphize(bx.cx().tcx());
217 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
218 }
219 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
220 }
221 }
222 mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
223 // This is a no-op at the LLVM level.
224 operand.val
225 }
226 mir::CastKind::Pointer(PointerCast::Unsize) => {
227 assert!(bx.cx().is_backend_scalar_pair(cast));
228 let (lldata, llextra) = match operand.val {
229 OperandValue::Pair(lldata, llextra) => {
230 // unsize from a fat pointer -- this is a
231 // "trait-object-to-supertrait" coercion.
232 (lldata, Some(llextra))
233 }
234 OperandValue::Immediate(lldata) => {
235 // "standard" unsize
236 (lldata, None)
237 }
238 OperandValue::Ref(..) => {
239 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
240 }
241 };
242 let (lldata, llextra) =
243 base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
244 OperandValue::Pair(lldata, llextra)
245 }
246 mir::CastKind::Pointer(PointerCast::MutToConstPointer)
247 | mir::CastKind::Misc
248 if bx.cx().is_backend_scalar_pair(operand.layout) =>
249 {
250 if let OperandValue::Pair(data_ptr, meta) = operand.val {
251 if bx.cx().is_backend_scalar_pair(cast) {
252 let data_cast = bx.pointercast(
253 data_ptr,
254 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
255 );
256 OperandValue::Pair(data_cast, meta)
257 } else {
258 // cast to thin-ptr
259 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
260 // pointer-cast of that pointer to desired pointer type.
261 let llcast_ty = bx.cx().immediate_backend_type(cast);
262 let llval = bx.pointercast(data_ptr, llcast_ty);
263 OperandValue::Immediate(llval)
264 }
265 } else {
266 bug!("unexpected non-pair operand");
267 }
268 }
269 mir::CastKind::Pointer(
270 PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
271 )
272 | mir::CastKind::Misc
273 // Since int2ptr can have arbitrary integer types as input (so we have to do
274 // sign extension and all that), it is currently best handled in the same code
275 // path as the other integer-to-X casts.
276 | mir::CastKind::PointerFromExposedAddress => {
277 assert!(bx.cx().is_backend_immediate(cast));
278 let ll_t_out = bx.cx().immediate_backend_type(cast);
279 if operand.layout.abi.is_uninhabited() {
280 let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
281 return (bx, OperandRef { val, layout: cast });
282 }
283 let r_t_in =
284 CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
285 let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
286 let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
287 match operand.layout.variants {
288 Variants::Single { index } => {
289 if let Some(discr) =
290 operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
291 {
292 let discr_layout = bx.cx().layout_of(discr.ty);
293 let discr_t = bx.cx().immediate_backend_type(discr_layout);
294 let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
295 let discr_val =
296 bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
297
298 return (
299 bx,
300 OperandRef {
301 val: OperandValue::Immediate(discr_val),
302 layout: cast,
303 },
304 );
305 }
306 }
307 Variants::Multiple { .. } => {}
308 }
309 let llval = operand.immediate();
310
311 let mut signed = false;
312 if let Abi::Scalar(scalar) = operand.layout.abi {
313 if let Int(_, s) = scalar.primitive() {
314 // We use `i1` for bytes that are always `0` or `1`,
315 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
316 // let LLVM interpret the `i1` as signed, because
317 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
318 signed = !scalar.is_bool() && s;
319
320 if !scalar.is_always_valid(bx.cx())
321 && scalar.valid_range(bx.cx()).end
322 >= scalar.valid_range(bx.cx()).start
323 {
324 // We want `table[e as usize ± k]` to not
325 // have bound checks, and this is the most
326 // convenient place to put the `assume`s.
327 if scalar.valid_range(bx.cx()).start > 0 {
328 let enum_value_lower_bound = bx.cx().const_uint_big(
329 ll_t_in,
330 scalar.valid_range(bx.cx()).start,
331 );
332 let cmp_start = bx.icmp(
333 IntPredicate::IntUGE,
334 llval,
335 enum_value_lower_bound,
336 );
337 bx.assume(cmp_start);
338 }
339
340 let enum_value_upper_bound = bx
341 .cx()
342 .const_uint_big(ll_t_in, scalar.valid_range(bx.cx()).end);
343 let cmp_end = bx.icmp(
344 IntPredicate::IntULE,
345 llval,
346 enum_value_upper_bound,
347 );
348 bx.assume(cmp_end);
349 }
350 }
351 }
352
353 let newval = match (r_t_in, r_t_out) {
354 (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
355 (CastTy::Float, CastTy::Float) => {
356 let srcsz = bx.cx().float_width(ll_t_in);
357 let dstsz = bx.cx().float_width(ll_t_out);
358 if dstsz > srcsz {
359 bx.fpext(llval, ll_t_out)
360 } else if srcsz > dstsz {
361 bx.fptrunc(llval, ll_t_out)
362 } else {
363 llval
364 }
365 }
366 (CastTy::Int(_), CastTy::Float) => {
367 if signed {
368 bx.sitofp(llval, ll_t_out)
369 } else {
370 bx.uitofp(llval, ll_t_out)
371 }
372 }
373 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
374 bx.pointercast(llval, ll_t_out)
375 }
376 (CastTy::Int(_), CastTy::Ptr(_)) => {
377 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
378 bx.inttoptr(usize_llval, ll_t_out)
379 }
380 (CastTy::Float, CastTy::Int(IntTy::I)) => {
381 bx.cast_float_to_int(true, llval, ll_t_out)
382 }
383 (CastTy::Float, CastTy::Int(_)) => {
384 bx.cast_float_to_int(false, llval, ll_t_out)
385 }
386 _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
387 };
388 OperandValue::Immediate(newval)
389 }
390 };
391 (bx, OperandRef { val, layout: cast })
392 }
393
394 mir::Rvalue::Ref(_, bk, place) => {
395 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
396 tcx.mk_ref(
397 tcx.lifetimes.re_erased,
398 ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
399 )
400 };
401 self.codegen_place_to_pointer(bx, place, mk_ref)
402 }
403
404 mir::Rvalue::AddressOf(mutability, place) => {
405 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
406 tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
407 };
408 self.codegen_place_to_pointer(bx, place, mk_ptr)
409 }
410
411 mir::Rvalue::Len(place) => {
412 let size = self.evaluate_array_len(&mut bx, place);
413 let operand = OperandRef {
414 val: OperandValue::Immediate(size),
415 layout: bx.cx().layout_of(bx.tcx().types.usize),
416 };
417 (bx, operand)
418 }
419
420 mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
421 let lhs = self.codegen_operand(&mut bx, lhs);
422 let rhs = self.codegen_operand(&mut bx, rhs);
423 let llresult = match (lhs.val, rhs.val) {
424 (
425 OperandValue::Pair(lhs_addr, lhs_extra),
426 OperandValue::Pair(rhs_addr, rhs_extra),
427 ) => self.codegen_fat_ptr_binop(
428 &mut bx,
429 op,
430 lhs_addr,
431 lhs_extra,
432 rhs_addr,
433 rhs_extra,
434 lhs.layout.ty,
435 ),
436
437 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
438 self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
439 }
440
441 _ => bug!(),
442 };
443 let operand = OperandRef {
444 val: OperandValue::Immediate(llresult),
445 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
446 };
447 (bx, operand)
448 }
449 mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
450 let lhs = self.codegen_operand(&mut bx, lhs);
451 let rhs = self.codegen_operand(&mut bx, rhs);
452 let result = self.codegen_scalar_checked_binop(
453 &mut bx,
454 op,
455 lhs.immediate(),
456 rhs.immediate(),
457 lhs.layout.ty,
458 );
459 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
460 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
461 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
462
463 (bx, operand)
464 }
465
466 mir::Rvalue::UnaryOp(op, ref operand) => {
467 let operand = self.codegen_operand(&mut bx, operand);
468 let lloperand = operand.immediate();
469 let is_float = operand.layout.ty.is_floating_point();
470 let llval = match op {
471 mir::UnOp::Not => bx.not(lloperand),
472 mir::UnOp::Neg => {
473 if is_float {
474 bx.fneg(lloperand)
475 } else {
476 bx.neg(lloperand)
477 }
478 }
479 };
480 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
481 }
482
483 mir::Rvalue::Discriminant(ref place) => {
484 let discr_ty = rvalue.ty(self.mir, bx.tcx());
485 let discr_ty = self.monomorphize(discr_ty);
486 let discr = self
487 .codegen_place(&mut bx, place.as_ref())
488 .codegen_get_discr(&mut bx, discr_ty);
489 (
490 bx,
491 OperandRef {
492 val: OperandValue::Immediate(discr),
493 layout: self.cx.layout_of(discr_ty),
494 },
495 )
496 }
497
498 mir::Rvalue::NullaryOp(null_op, ty) => {
499 let ty = self.monomorphize(ty);
500 assert!(bx.cx().type_is_sized(ty));
501 let layout = bx.cx().layout_of(ty);
502 let val = match null_op {
503 mir::NullOp::SizeOf => layout.size.bytes(),
504 mir::NullOp::AlignOf => layout.align.abi.bytes(),
505 };
506 let val = bx.cx().const_usize(val);
507 let tcx = self.cx.tcx();
508 (
509 bx,
510 OperandRef {
511 val: OperandValue::Immediate(val),
512 layout: self.cx.layout_of(tcx.types.usize),
513 },
514 )
515 }
516
517 mir::Rvalue::ThreadLocalRef(def_id) => {
518 assert!(bx.cx().tcx().is_static(def_id));
519 let static_ = bx.get_static(def_id);
520 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
521 let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
522 (bx, operand)
523 }
524 mir::Rvalue::Use(ref operand) => {
525 let operand = self.codegen_operand(&mut bx, operand);
526 (bx, operand)
527 }
528 mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
529 // According to `rvalue_creates_operand`, only ZST
530 // aggregate rvalues are allowed to be operands.
531 let ty = rvalue.ty(self.mir, self.cx.tcx());
532 let operand =
533 OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
534 (bx, operand)
535 }
536 mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
537 let operand = self.codegen_operand(&mut bx, operand);
538 let lloperand = operand.immediate();
539
540 let content_ty = self.monomorphize(content_ty);
541 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
542 let llty_ptr = bx.cx().backend_type(box_layout);
543
544 let val = bx.pointercast(lloperand, llty_ptr);
545 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
546 (bx, operand)
547 }
548 }
549 }
550
551 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
552 // ZST are passed as operands and require special handling
553 // because codegen_place() panics if Local is operand.
554 if let Some(index) = place.as_local() {
555 if let LocalRef::Operand(Some(op)) = self.locals[index] {
556 if let ty::Array(_, n) = op.layout.ty.kind() {
557 let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
558 return bx.cx().const_usize(n);
559 }
560 }
561 }
562 // use common size calculation for non zero-sized types
563 let cg_value = self.codegen_place(bx, place.as_ref());
564 cg_value.len(bx.cx())
565 }
566
567 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
568 fn codegen_place_to_pointer(
569 &mut self,
570 mut bx: Bx,
571 place: mir::Place<'tcx>,
572 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
573 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
574 let cg_place = self.codegen_place(&mut bx, place.as_ref());
575
576 let ty = cg_place.layout.ty;
577
578 // Note: places are indirect, so storing the `llval` into the
579 // destination effectively creates a reference.
580 let val = if !bx.cx().type_has_metadata(ty) {
581 OperandValue::Immediate(cg_place.llval)
582 } else {
583 OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
584 };
585 (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
586 }
587
588 pub fn codegen_scalar_binop(
589 &mut self,
590 bx: &mut Bx,
591 op: mir::BinOp,
592 lhs: Bx::Value,
593 rhs: Bx::Value,
594 input_ty: Ty<'tcx>,
595 ) -> Bx::Value {
596 let is_float = input_ty.is_floating_point();
597 let is_signed = input_ty.is_signed();
598 match op {
599 mir::BinOp::Add => {
600 if is_float {
601 bx.fadd(lhs, rhs)
602 } else {
603 bx.add(lhs, rhs)
604 }
605 }
606 mir::BinOp::Sub => {
607 if is_float {
608 bx.fsub(lhs, rhs)
609 } else {
610 bx.sub(lhs, rhs)
611 }
612 }
613 mir::BinOp::Mul => {
614 if is_float {
615 bx.fmul(lhs, rhs)
616 } else {
617 bx.mul(lhs, rhs)
618 }
619 }
620 mir::BinOp::Div => {
621 if is_float {
622 bx.fdiv(lhs, rhs)
623 } else if is_signed {
624 bx.sdiv(lhs, rhs)
625 } else {
626 bx.udiv(lhs, rhs)
627 }
628 }
629 mir::BinOp::Rem => {
630 if is_float {
631 bx.frem(lhs, rhs)
632 } else if is_signed {
633 bx.srem(lhs, rhs)
634 } else {
635 bx.urem(lhs, rhs)
636 }
637 }
638 mir::BinOp::BitOr => bx.or(lhs, rhs),
639 mir::BinOp::BitAnd => bx.and(lhs, rhs),
640 mir::BinOp::BitXor => bx.xor(lhs, rhs),
641 mir::BinOp::Offset => {
642 let pointee_type = input_ty
643 .builtin_deref(true)
644 .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
645 .ty;
646 let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
647 bx.inbounds_gep(llty, lhs, &[rhs])
648 }
649 mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
650 mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
651 mir::BinOp::Ne
652 | mir::BinOp::Lt
653 | mir::BinOp::Gt
654 | mir::BinOp::Eq
655 | mir::BinOp::Le
656 | mir::BinOp::Ge => {
657 if is_float {
658 bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
659 } else {
660 bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
661 }
662 }
663 }
664 }
665
666 pub fn codegen_fat_ptr_binop(
667 &mut self,
668 bx: &mut Bx,
669 op: mir::BinOp,
670 lhs_addr: Bx::Value,
671 lhs_extra: Bx::Value,
672 rhs_addr: Bx::Value,
673 rhs_extra: Bx::Value,
674 _input_ty: Ty<'tcx>,
675 ) -> Bx::Value {
676 match op {
677 mir::BinOp::Eq => {
678 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
679 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
680 bx.and(lhs, rhs)
681 }
682 mir::BinOp::Ne => {
683 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
684 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
685 bx.or(lhs, rhs)
686 }
687 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
688 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
689 let (op, strict_op) = match op {
690 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
691 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
692 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
693 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
694 _ => bug!(),
695 };
696 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
697 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
698 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
699 let rhs = bx.and(and_lhs, and_rhs);
700 bx.or(lhs, rhs)
701 }
702 _ => {
703 bug!("unexpected fat ptr binop");
704 }
705 }
706 }
707
708 pub fn codegen_scalar_checked_binop(
709 &mut self,
710 bx: &mut Bx,
711 op: mir::BinOp,
712 lhs: Bx::Value,
713 rhs: Bx::Value,
714 input_ty: Ty<'tcx>,
715 ) -> OperandValue<Bx::Value> {
716 // This case can currently arise only from functions marked
717 // with #[rustc_inherit_overflow_checks] and inlined from
718 // another crate (mostly core::num generic/#[inline] fns),
719 // while the current crate doesn't use overflow checks.
720 if !bx.cx().check_overflow() {
721 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
722 return OperandValue::Pair(val, bx.cx().const_bool(false));
723 }
724
725 let (val, of) = match op {
726 // These are checked using intrinsics
727 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
728 let oop = match op {
729 mir::BinOp::Add => OverflowOp::Add,
730 mir::BinOp::Sub => OverflowOp::Sub,
731 mir::BinOp::Mul => OverflowOp::Mul,
732 _ => unreachable!(),
733 };
734 bx.checked_binop(oop, input_ty, lhs, rhs)
735 }
736 mir::BinOp::Shl | mir::BinOp::Shr => {
737 let lhs_llty = bx.cx().val_ty(lhs);
738 let rhs_llty = bx.cx().val_ty(rhs);
739 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
740 let outer_bits = bx.and(rhs, invert_mask);
741
742 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
743 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
744
745 (val, of)
746 }
747 _ => bug!("Operator `{:?}` is not a checkable operator", op),
748 };
749
750 OperandValue::Pair(val, of)
751 }
752 }
753
754 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
755 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
756 match *rvalue {
757 mir::Rvalue::Ref(..) |
758 mir::Rvalue::AddressOf(..) |
759 mir::Rvalue::Len(..) |
760 mir::Rvalue::Cast(..) | // (*)
761 mir::Rvalue::ShallowInitBox(..) | // (*)
762 mir::Rvalue::BinaryOp(..) |
763 mir::Rvalue::CheckedBinaryOp(..) |
764 mir::Rvalue::UnaryOp(..) |
765 mir::Rvalue::Discriminant(..) |
766 mir::Rvalue::NullaryOp(..) |
767 mir::Rvalue::ThreadLocalRef(_) |
768 mir::Rvalue::Use(..) => // (*)
769 true,
770 mir::Rvalue::Repeat(..) |
771 mir::Rvalue::Aggregate(..) => {
772 let ty = rvalue.ty(self.mir, self.cx.tcx());
773 let ty = self.monomorphize(ty);
774 self.cx.spanned_layout_of(ty, span).is_zst()
775 }
776 }
777
778 // (*) this is only true if the type is suitable
779 }
780 }