]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/mir/rvalue.rs
New upstream version 1.14.0+dfsg1
[rustc.git] / src / librustc_trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::cast::{CastTy, IntTy};
14 use rustc::ty::layout::Layout;
15 use rustc::mir;
16
17 use asm;
18 use base;
19 use callee::Callee;
20 use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
21 use common::{C_integral};
22 use debuginfo::DebugLoc;
23 use adt;
24 use machine;
25 use type_::Type;
26 use type_of;
27 use tvec;
28 use value::Value;
29 use Disr;
30
31 use super::MirContext;
32 use super::constant::const_scalar_checked_binop;
33 use super::operand::{OperandRef, OperandValue};
34 use super::lvalue::{LvalueRef};
35
36 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
37 pub fn trans_rvalue(&mut self,
38 bcx: BlockAndBuilder<'bcx, 'tcx>,
39 dest: LvalueRef<'tcx>,
40 rvalue: &mir::Rvalue<'tcx>,
41 debug_loc: DebugLoc)
42 -> BlockAndBuilder<'bcx, 'tcx>
43 {
44 debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
45 Value(dest.llval), rvalue);
46
47 match *rvalue {
48 mir::Rvalue::Use(ref operand) => {
49 let tr_operand = self.trans_operand(&bcx, operand);
50 // FIXME: consider not copying constants through stack. (fixable by translating
51 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
52 self.store_operand(&bcx, dest.llval, tr_operand);
53 bcx
54 }
55
56 mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
57 let cast_ty = bcx.monomorphize(&cast_ty);
58
59 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
60 // into-coerce of a thin pointer to a fat pointer - just
61 // use the operand path.
62 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
63 self.store_operand(&bcx, dest.llval, temp);
64 return bcx;
65 }
66
67 // Unsize of a nontrivial struct. I would prefer for
68 // this to be eliminated by MIR translation, but
69 // `CoerceUnsized` can be passed by a where-clause,
70 // so the (generic) MIR may not be able to expand it.
71 let operand = self.trans_operand(&bcx, source);
72 let operand = operand.pack_if_pair(&bcx);
73 bcx.with_block(|bcx| {
74 match operand.val {
75 OperandValue::Pair(..) => bug!(),
76 OperandValue::Immediate(llval) => {
77 // unsize from an immediate structure. We don't
78 // really need a temporary alloca here, but
79 // avoiding it would require us to have
80 // `coerce_unsized_into` use extractvalue to
81 // index into the struct, and this case isn't
82 // important enough for it.
83 debug!("trans_rvalue: creating ugly alloca");
84 let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
85 base::store_ty(bcx, llval, lltemp, operand.ty);
86 base::coerce_unsized_into(bcx,
87 lltemp, operand.ty,
88 dest.llval, cast_ty);
89 }
90 OperandValue::Ref(llref) => {
91 base::coerce_unsized_into(bcx,
92 llref, operand.ty,
93 dest.llval, cast_ty);
94 }
95 }
96 });
97 bcx
98 }
99
100 mir::Rvalue::Repeat(ref elem, ref count) => {
101 let tr_elem = self.trans_operand(&bcx, elem);
102 let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
103 let size = C_uint(bcx.ccx(), size);
104 let base = base::get_dataptr_builder(&bcx, dest.llval);
105 let bcx = bcx.map_block(|block| {
106 tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| {
107 self.store_operand_direct(block, llslot, tr_elem);
108 block
109 })
110 });
111 bcx
112 }
113
114 mir::Rvalue::Aggregate(ref kind, ref operands) => {
115 match *kind {
116 mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
117 let disr = Disr::from(adt_def.variants[variant_index].disr_val);
118 bcx.with_block(|bcx| {
119 adt::trans_set_discr(bcx,
120 dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr));
121 });
122 for (i, operand) in operands.iter().enumerate() {
123 let op = self.trans_operand(&bcx, operand);
124 // Do not generate stores and GEPis for zero-sized fields.
125 if !common::type_is_zero_size(bcx.ccx(), op.ty) {
126 let val = adt::MaybeSizedValue::sized(dest.llval);
127 let field_index = active_field_index.unwrap_or(i);
128 let lldest_i = adt::trans_field_ptr_builder(&bcx,
129 dest.ty.to_ty(bcx.tcx()),
130 val, disr, field_index);
131 self.store_operand(&bcx, lldest_i, op);
132 }
133 }
134 },
135 _ => {
136 // FIXME Shouldn't need to manually trigger closure instantiations.
137 if let mir::AggregateKind::Closure(def_id, substs) = *kind {
138 use closure;
139
140 closure::trans_closure_body_via_mir(bcx.ccx(),
141 def_id,
142 bcx.monomorphize(&substs));
143 }
144
145 for (i, operand) in operands.iter().enumerate() {
146 let op = self.trans_operand(&bcx, operand);
147 // Do not generate stores and GEPis for zero-sized fields.
148 if !common::type_is_zero_size(bcx.ccx(), op.ty) {
149 // Note: perhaps this should be StructGep, but
150 // note that in some cases the values here will
151 // not be structs but arrays.
152 let dest = bcx.gepi(dest.llval, &[0, i]);
153 self.store_operand(&bcx, dest, op);
154 }
155 }
156 }
157 }
158 bcx
159 }
160
161 mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
162 let outputs = outputs.iter().map(|output| {
163 let lvalue = self.trans_lvalue(&bcx, output);
164 (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
165 }).collect();
166
167 let input_vals = inputs.iter().map(|input| {
168 self.trans_operand(&bcx, input).immediate()
169 }).collect();
170
171 bcx.with_block(|bcx| {
172 asm::trans_inline_asm(bcx, asm, outputs, input_vals);
173 });
174
175 bcx
176 }
177
178 _ => {
179 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue));
180 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc);
181 self.store_operand(&bcx, dest.llval, temp);
182 bcx
183 }
184 }
185 }
186
187 pub fn trans_rvalue_operand(&mut self,
188 bcx: BlockAndBuilder<'bcx, 'tcx>,
189 rvalue: &mir::Rvalue<'tcx>,
190 debug_loc: DebugLoc)
191 -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>)
192 {
193 assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue),
194 "cannot trans {:?} to operand", rvalue);
195
196 match *rvalue {
197 mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
198 let operand = self.trans_operand(&bcx, source);
199 debug!("cast operand is {:?}", operand);
200 let cast_ty = bcx.monomorphize(&cast_ty);
201
202 let val = match *kind {
203 mir::CastKind::ReifyFnPointer => {
204 match operand.ty.sty {
205 ty::TyFnDef(def_id, substs, _) => {
206 OperandValue::Immediate(
207 Callee::def(bcx.ccx(), def_id, substs)
208 .reify(bcx.ccx()))
209 }
210 _ => {
211 bug!("{} cannot be reified to a fn ptr", operand.ty)
212 }
213 }
214 }
215 mir::CastKind::UnsafeFnPointer => {
216 // this is a no-op at the LLVM level
217 operand.val
218 }
219 mir::CastKind::Unsize => {
220 // unsize targets other than to a fat pointer currently
221 // can't be operands.
222 assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
223
224 match operand.val {
225 OperandValue::Pair(lldata, llextra) => {
226 // unsize from a fat pointer - this is a
227 // "trait-object-to-supertrait" coercion, for
228 // example,
229 // &'a fmt::Debug+Send => &'a fmt::Debug,
230 // So we need to pointercast the base to ensure
231 // the types match up.
232 let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
233 let lldata = bcx.pointercast(lldata, llcast_ty);
234 OperandValue::Pair(lldata, llextra)
235 }
236 OperandValue::Immediate(lldata) => {
237 // "standard" unsize
238 let (lldata, llextra) = bcx.with_block(|bcx| {
239 base::unsize_thin_ptr(bcx, lldata,
240 operand.ty, cast_ty)
241 });
242 OperandValue::Pair(lldata, llextra)
243 }
244 OperandValue::Ref(_) => {
245 bug!("by-ref operand {:?} in trans_rvalue_operand",
246 operand);
247 }
248 }
249 }
250 mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
251 let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
252 let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
253 if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
254 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
255 let ll_cft = ll_cast_ty.field_types();
256 let ll_fft = ll_from_ty.field_types();
257 let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
258 assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
259 OperandValue::Pair(data_cast, meta_ptr)
260 } else { // cast to thin-ptr
261 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
262 // pointer-cast of that pointer to desired pointer type.
263 let llval = bcx.pointercast(data_ptr, ll_cast_ty);
264 OperandValue::Immediate(llval)
265 }
266 } else {
267 bug!("Unexpected non-Pair operand")
268 }
269 }
270 mir::CastKind::Misc => {
271 debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
272 let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
273 let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
274 let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
275 let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
276 let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in {
277 let l = bcx.ccx().layout_of(operand.ty);
278 let discr = match operand.val {
279 OperandValue::Immediate(llval) => llval,
280 OperandValue::Ref(llptr) => {
281 bcx.with_block(|bcx| {
282 adt::trans_get_discr(bcx, operand.ty, llptr, None, true)
283 })
284 }
285 OperandValue::Pair(..) => bug!("Unexpected Pair operand")
286 };
287 let (signed, min, max) = match l {
288 &Layout::CEnum { signed, min, max, .. } => {
289 (signed, min, max)
290 }
291 _ => bug!("CEnum {:?} is not an enum", operand)
292 };
293
294 if max > min {
295 // We want `table[e as usize]` to not
296 // have bound checks, and this is the most
297 // convenient place to put the `assume`.
298
299 base::call_assume(&bcx, bcx.icmp(
300 llvm::IntULE,
301 discr,
302 C_integral(common::val_ty(discr), max, false)
303 ))
304 }
305
306 (discr, signed)
307 } else {
308 (operand.immediate(), operand.ty.is_signed())
309 };
310
311 let newval = match (r_t_in, r_t_out) {
312 (CastTy::Int(_), CastTy::Int(_)) => {
313 let srcsz = ll_t_in.int_width();
314 let dstsz = ll_t_out.int_width();
315 if srcsz == dstsz {
316 bcx.bitcast(llval, ll_t_out)
317 } else if srcsz > dstsz {
318 bcx.trunc(llval, ll_t_out)
319 } else if signed {
320 bcx.sext(llval, ll_t_out)
321 } else {
322 bcx.zext(llval, ll_t_out)
323 }
324 }
325 (CastTy::Float, CastTy::Float) => {
326 let srcsz = ll_t_in.float_width();
327 let dstsz = ll_t_out.float_width();
328 if dstsz > srcsz {
329 bcx.fpext(llval, ll_t_out)
330 } else if srcsz > dstsz {
331 bcx.fptrunc(llval, ll_t_out)
332 } else {
333 llval
334 }
335 }
336 (CastTy::Ptr(_), CastTy::Ptr(_)) |
337 (CastTy::FnPtr, CastTy::Ptr(_)) |
338 (CastTy::RPtr(_), CastTy::Ptr(_)) =>
339 bcx.pointercast(llval, ll_t_out),
340 (CastTy::Ptr(_), CastTy::Int(_)) |
341 (CastTy::FnPtr, CastTy::Int(_)) =>
342 bcx.ptrtoint(llval, ll_t_out),
343 (CastTy::Int(_), CastTy::Ptr(_)) =>
344 bcx.inttoptr(llval, ll_t_out),
345 (CastTy::Int(_), CastTy::Float) if signed =>
346 bcx.sitofp(llval, ll_t_out),
347 (CastTy::Int(_), CastTy::Float) =>
348 bcx.uitofp(llval, ll_t_out),
349 (CastTy::Float, CastTy::Int(IntTy::I)) =>
350 bcx.fptosi(llval, ll_t_out),
351 (CastTy::Float, CastTy::Int(_)) =>
352 bcx.fptoui(llval, ll_t_out),
353 _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
354 };
355 OperandValue::Immediate(newval)
356 }
357 };
358 let operand = OperandRef {
359 val: val,
360 ty: cast_ty
361 };
362 (bcx, operand)
363 }
364
365 mir::Rvalue::Ref(_, bk, ref lvalue) => {
366 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
367
368 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
369 let ref_ty = bcx.tcx().mk_ref(
370 bcx.tcx().mk_region(ty::ReErased),
371 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
372 );
373
374 // Note: lvalues are indirect, so storing the `llval` into the
375 // destination effectively creates a reference.
376 let operand = if common::type_is_sized(bcx.tcx(), ty) {
377 OperandRef {
378 val: OperandValue::Immediate(tr_lvalue.llval),
379 ty: ref_ty,
380 }
381 } else {
382 OperandRef {
383 val: OperandValue::Pair(tr_lvalue.llval,
384 tr_lvalue.llextra),
385 ty: ref_ty,
386 }
387 };
388 (bcx, operand)
389 }
390
391 mir::Rvalue::Len(ref lvalue) => {
392 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
393 let operand = OperandRef {
394 val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())),
395 ty: bcx.tcx().types.usize,
396 };
397 (bcx, operand)
398 }
399
400 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
401 let lhs = self.trans_operand(&bcx, lhs);
402 let rhs = self.trans_operand(&bcx, rhs);
403 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
404 match (lhs.val, rhs.val) {
405 (OperandValue::Pair(lhs_addr, lhs_extra),
406 OperandValue::Pair(rhs_addr, rhs_extra)) => {
407 self.trans_fat_ptr_binop(&bcx, op,
408 lhs_addr, lhs_extra,
409 rhs_addr, rhs_extra,
410 lhs.ty)
411 }
412 _ => bug!()
413 }
414
415 } else {
416 self.trans_scalar_binop(&bcx, op,
417 lhs.immediate(), rhs.immediate(),
418 lhs.ty)
419 };
420 let operand = OperandRef {
421 val: OperandValue::Immediate(llresult),
422 ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
423 };
424 (bcx, operand)
425 }
426 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
427 let lhs = self.trans_operand(&bcx, lhs);
428 let rhs = self.trans_operand(&bcx, rhs);
429 let result = self.trans_scalar_checked_binop(&bcx, op,
430 lhs.immediate(), rhs.immediate(),
431 lhs.ty);
432 let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
433 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]);
434 let operand = OperandRef {
435 val: result,
436 ty: operand_ty
437 };
438
439 (bcx, operand)
440 }
441
442 mir::Rvalue::UnaryOp(op, ref operand) => {
443 let operand = self.trans_operand(&bcx, operand);
444 let lloperand = operand.immediate();
445 let is_float = operand.ty.is_fp();
446 let llval = match op {
447 mir::UnOp::Not => bcx.not(lloperand),
448 mir::UnOp::Neg => if is_float {
449 bcx.fneg(lloperand)
450 } else {
451 bcx.neg(lloperand)
452 }
453 };
454 (bcx, OperandRef {
455 val: OperandValue::Immediate(llval),
456 ty: operand.ty,
457 })
458 }
459
460 mir::Rvalue::Box(content_ty) => {
461 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
462 let llty = type_of::type_of(bcx.ccx(), content_ty);
463 let llsize = machine::llsize_of(bcx.ccx(), llty);
464 let align = type_of::align_of(bcx.ccx(), content_ty);
465 let llalign = C_uint(bcx.ccx(), align);
466 let llty_ptr = llty.ptr_to();
467 let box_ty = bcx.tcx().mk_box(content_ty);
468 let mut llval = None;
469 let bcx = bcx.map_block(|bcx| {
470 let Result { bcx, val } = base::malloc_raw_dyn(bcx,
471 llty_ptr,
472 box_ty,
473 llsize,
474 llalign,
475 debug_loc);
476 llval = Some(val);
477 bcx
478 });
479 let operand = OperandRef {
480 val: OperandValue::Immediate(llval.unwrap()),
481 ty: box_ty,
482 };
483 (bcx, operand)
484 }
485
486 mir::Rvalue::Use(ref operand) => {
487 let operand = self.trans_operand(&bcx, operand);
488 (bcx, operand)
489 }
490 mir::Rvalue::Repeat(..) |
491 mir::Rvalue::Aggregate(..) |
492 mir::Rvalue::InlineAsm { .. } => {
493 bug!("cannot generate operand from rvalue {:?}", rvalue);
494
495 }
496 }
497 }
498
499 pub fn trans_scalar_binop(&mut self,
500 bcx: &BlockAndBuilder<'bcx, 'tcx>,
501 op: mir::BinOp,
502 lhs: ValueRef,
503 rhs: ValueRef,
504 input_ty: Ty<'tcx>) -> ValueRef {
505 let is_float = input_ty.is_fp();
506 let is_signed = input_ty.is_signed();
507 let is_nil = input_ty.is_nil();
508 let is_bool = input_ty.is_bool();
509 match op {
510 mir::BinOp::Add => if is_float {
511 bcx.fadd(lhs, rhs)
512 } else {
513 bcx.add(lhs, rhs)
514 },
515 mir::BinOp::Sub => if is_float {
516 bcx.fsub(lhs, rhs)
517 } else {
518 bcx.sub(lhs, rhs)
519 },
520 mir::BinOp::Mul => if is_float {
521 bcx.fmul(lhs, rhs)
522 } else {
523 bcx.mul(lhs, rhs)
524 },
525 mir::BinOp::Div => if is_float {
526 bcx.fdiv(lhs, rhs)
527 } else if is_signed {
528 bcx.sdiv(lhs, rhs)
529 } else {
530 bcx.udiv(lhs, rhs)
531 },
532 mir::BinOp::Rem => if is_float {
533 bcx.frem(lhs, rhs)
534 } else if is_signed {
535 bcx.srem(lhs, rhs)
536 } else {
537 bcx.urem(lhs, rhs)
538 },
539 mir::BinOp::BitOr => bcx.or(lhs, rhs),
540 mir::BinOp::BitAnd => bcx.and(lhs, rhs),
541 mir::BinOp::BitXor => bcx.xor(lhs, rhs),
542 mir::BinOp::Shl => {
543 bcx.with_block(|bcx| {
544 common::build_unchecked_lshift(bcx,
545 lhs,
546 rhs,
547 DebugLoc::None)
548 })
549 }
550 mir::BinOp::Shr => {
551 bcx.with_block(|bcx| {
552 common::build_unchecked_rshift(bcx,
553 input_ty,
554 lhs,
555 rhs,
556 DebugLoc::None)
557 })
558 }
559 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
560 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
561 C_bool(bcx.ccx(), match op {
562 mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
563 mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
564 _ => unreachable!()
565 })
566 } else if is_float {
567 bcx.fcmp(
568 base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
569 lhs, rhs
570 )
571 } else {
572 let (lhs, rhs) = if is_bool {
573 // FIXME(#36856) -- extend the bools into `i8` because
574 // LLVM's i1 comparisons are broken.
575 (bcx.zext(lhs, Type::i8(bcx.ccx())),
576 bcx.zext(rhs, Type::i8(bcx.ccx())))
577 } else {
578 (lhs, rhs)
579 };
580
581 bcx.icmp(
582 base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
583 lhs, rhs
584 )
585 }
586 }
587 }
588
589 pub fn trans_fat_ptr_binop(&mut self,
590 bcx: &BlockAndBuilder<'bcx, 'tcx>,
591 op: mir::BinOp,
592 lhs_addr: ValueRef,
593 lhs_extra: ValueRef,
594 rhs_addr: ValueRef,
595 rhs_extra: ValueRef,
596 _input_ty: Ty<'tcx>)
597 -> ValueRef {
598 match op {
599 mir::BinOp::Eq => {
600 bcx.and(
601 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
602 bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
603 )
604 }
605 mir::BinOp::Ne => {
606 bcx.or(
607 bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
608 bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
609 )
610 }
611 mir::BinOp::Le | mir::BinOp::Lt |
612 mir::BinOp::Ge | mir::BinOp::Gt => {
613 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
614 let (op, strict_op) = match op {
615 mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
616 mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
617 mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
618 mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
619 _ => bug!(),
620 };
621
622 bcx.or(
623 bcx.icmp(strict_op, lhs_addr, rhs_addr),
624 bcx.and(
625 bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
626 bcx.icmp(op, lhs_extra, rhs_extra)
627 )
628 )
629 }
630 _ => {
631 bug!("unexpected fat ptr binop");
632 }
633 }
634 }
635
636 pub fn trans_scalar_checked_binop(&mut self,
637 bcx: &BlockAndBuilder<'bcx, 'tcx>,
638 op: mir::BinOp,
639 lhs: ValueRef,
640 rhs: ValueRef,
641 input_ty: Ty<'tcx>) -> OperandValue {
642 // This case can currently arise only from functions marked
643 // with #[rustc_inherit_overflow_checks] and inlined from
644 // another crate (mostly core::num generic/#[inline] fns),
645 // while the current crate doesn't use overflow checks.
646 if !bcx.ccx().check_overflow() {
647 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
648 return OperandValue::Pair(val, C_bool(bcx.ccx(), false));
649 }
650
651 // First try performing the operation on constants, which
652 // will only succeed if both operands are constant.
653 // This is necessary to determine when an overflow Assert
654 // will always panic at runtime, and produce a warning.
655 if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
656 return OperandValue::Pair(val, C_bool(bcx.ccx(), of));
657 }
658
659 let (val, of) = match op {
660 // These are checked using intrinsics
661 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
662 let oop = match op {
663 mir::BinOp::Add => OverflowOp::Add,
664 mir::BinOp::Sub => OverflowOp::Sub,
665 mir::BinOp::Mul => OverflowOp::Mul,
666 _ => unreachable!()
667 };
668 let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
669 let res = bcx.call(intrinsic, &[lhs, rhs], None);
670
671 (bcx.extract_value(res, 0),
672 bcx.extract_value(res, 1))
673 }
674 mir::BinOp::Shl | mir::BinOp::Shr => {
675 let lhs_llty = val_ty(lhs);
676 let rhs_llty = val_ty(rhs);
677 let invert_mask = bcx.with_block(|bcx| {
678 common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
679 });
680 let outer_bits = bcx.and(rhs, invert_mask);
681
682 let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
683 let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
684
685 (val, of)
686 }
687 _ => {
688 bug!("Operator `{:?}` is not a checkable operator", op)
689 }
690 };
691
692 OperandValue::Pair(val, of)
693 }
694 }
695
696 pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
697 _bcx: &BlockAndBuilder<'bcx, 'tcx>,
698 rvalue: &mir::Rvalue<'tcx>) -> bool {
699 match *rvalue {
700 mir::Rvalue::Ref(..) |
701 mir::Rvalue::Len(..) |
702 mir::Rvalue::Cast(..) | // (*)
703 mir::Rvalue::BinaryOp(..) |
704 mir::Rvalue::CheckedBinaryOp(..) |
705 mir::Rvalue::UnaryOp(..) |
706 mir::Rvalue::Box(..) |
707 mir::Rvalue::Use(..) =>
708 true,
709 mir::Rvalue::Repeat(..) |
710 mir::Rvalue::Aggregate(..) |
711 mir::Rvalue::InlineAsm { .. } =>
712 false,
713 }
714
715 // (*) this is only true if the type is suitable
716 }
717
718 #[derive(Copy, Clone)]
719 enum OverflowOp {
720 Add, Sub, Mul
721 }
722
723 fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef {
724 use syntax::ast::IntTy::*;
725 use syntax::ast::UintTy::*;
726 use rustc::ty::{TyInt, TyUint};
727
728 let tcx = bcx.tcx();
729
730 let new_sty = match ty.sty {
731 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
732 "32" => TyInt(I32),
733 "64" => TyInt(I64),
734 _ => panic!("unsupported target word size")
735 },
736 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
737 "32" => TyUint(U32),
738 "64" => TyUint(U64),
739 _ => panic!("unsupported target word size")
740 },
741 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
742 _ => panic!("tried to get overflow intrinsic for op applied to non-int type")
743 };
744
745 let name = match oop {
746 OverflowOp::Add => match new_sty {
747 TyInt(I8) => "llvm.sadd.with.overflow.i8",
748 TyInt(I16) => "llvm.sadd.with.overflow.i16",
749 TyInt(I32) => "llvm.sadd.with.overflow.i32",
750 TyInt(I64) => "llvm.sadd.with.overflow.i64",
751
752 TyUint(U8) => "llvm.uadd.with.overflow.i8",
753 TyUint(U16) => "llvm.uadd.with.overflow.i16",
754 TyUint(U32) => "llvm.uadd.with.overflow.i32",
755 TyUint(U64) => "llvm.uadd.with.overflow.i64",
756
757 _ => unreachable!(),
758 },
759 OverflowOp::Sub => match new_sty {
760 TyInt(I8) => "llvm.ssub.with.overflow.i8",
761 TyInt(I16) => "llvm.ssub.with.overflow.i16",
762 TyInt(I32) => "llvm.ssub.with.overflow.i32",
763 TyInt(I64) => "llvm.ssub.with.overflow.i64",
764
765 TyUint(U8) => "llvm.usub.with.overflow.i8",
766 TyUint(U16) => "llvm.usub.with.overflow.i16",
767 TyUint(U32) => "llvm.usub.with.overflow.i32",
768 TyUint(U64) => "llvm.usub.with.overflow.i64",
769
770 _ => unreachable!(),
771 },
772 OverflowOp::Mul => match new_sty {
773 TyInt(I8) => "llvm.smul.with.overflow.i8",
774 TyInt(I16) => "llvm.smul.with.overflow.i16",
775 TyInt(I32) => "llvm.smul.with.overflow.i32",
776 TyInt(I64) => "llvm.smul.with.overflow.i64",
777
778 TyUint(U8) => "llvm.umul.with.overflow.i8",
779 TyUint(U16) => "llvm.umul.with.overflow.i16",
780 TyUint(U32) => "llvm.umul.with.overflow.i32",
781 TyUint(U64) => "llvm.umul.with.overflow.i64",
782
783 _ => unreachable!(),
784 },
785 };
786
787 bcx.ccx().get_intrinsic(&name)
788 }