1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm
::{self, ValueRef}
;
12 use rustc
::ty
::{self, Ty}
;
13 use rustc
::ty
::cast
::{CastTy, IntTy}
;
14 use rustc
::ty
::layout
::{self, LayoutOf}
;
16 use rustc
::middle
::lang_items
::ExchangeMallocFnLangItem
;
17 use rustc_apfloat
::{ieee, Float, Status, Round}
;
18 use std
::{u128, i128}
;
23 use common
::{self, val_ty}
;
24 use common
::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big}
;
28 use type_of
::LayoutLlvmExt
;
31 use super::{FunctionCx, LocalRef}
;
32 use super::operand
::{OperandRef, OperandValue}
;
33 use super::place
::PlaceRef
;
35 impl<'a
, 'tcx
> FunctionCx
<'a
, 'tcx
> {
36 pub fn trans_rvalue(&mut self,
37 bx
: Builder
<'a
, 'tcx
>,
39 rvalue
: &mir
::Rvalue
<'tcx
>)
42 debug
!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
43 Value(dest
.llval
), rvalue
);
46 mir
::Rvalue
::Use(ref operand
) => {
47 let tr_operand
= self.trans_operand(&bx
, operand
);
48 // FIXME: consider not copying constants through stack. (fixable by translating
49 // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
50 tr_operand
.val
.store(&bx
, dest
);
54 mir
::Rvalue
::Cast(mir
::CastKind
::Unsize
, ref source
, _
) => {
55 // The destination necessarily contains a fat pointer, so if
56 // it's a scalar pair, it's a fat pointer or newtype thereof.
57 if dest
.layout
.is_llvm_scalar_pair() {
58 // into-coerce of a thin pointer to a fat pointer - just
59 // use the operand path.
60 let (bx
, temp
) = self.trans_rvalue_operand(bx
, rvalue
);
61 temp
.val
.store(&bx
, dest
);
65 // Unsize of a nontrivial struct. I would prefer for
66 // this to be eliminated by MIR translation, but
67 // `CoerceUnsized` can be passed by a where-clause,
68 // so the (generic) MIR may not be able to expand it.
69 let operand
= self.trans_operand(&bx
, source
);
71 OperandValue
::Pair(..) |
72 OperandValue
::Immediate(_
) => {
73 // unsize from an immediate structure. We don't
74 // really need a temporary alloca here, but
75 // avoiding it would require us to have
76 // `coerce_unsized_into` use extractvalue to
77 // index into the struct, and this case isn't
78 // important enough for it.
79 debug
!("trans_rvalue: creating ugly alloca");
80 let scratch
= PlaceRef
::alloca(&bx
, operand
.layout
, "__unsize_temp");
81 scratch
.storage_live(&bx
);
82 operand
.val
.store(&bx
, scratch
);
83 base
::coerce_unsized_into(&bx
, scratch
, dest
);
84 scratch
.storage_dead(&bx
);
86 OperandValue
::Ref(llref
, align
) => {
87 let source
= PlaceRef
::new_sized(llref
, operand
.layout
, align
);
88 base
::coerce_unsized_into(&bx
, source
, dest
);
94 mir
::Rvalue
::Repeat(ref elem
, count
) => {
95 let tr_elem
= self.trans_operand(&bx
, elem
);
97 // Do not generate the loop for zero-sized elements or empty arrays.
98 if dest
.layout
.is_zst() {
102 let start
= dest
.project_index(&bx
, C_usize(bx
.cx
, 0)).llval
;
104 if let OperandValue
::Immediate(v
) = tr_elem
.val
{
105 let align
= C_i32(bx
.cx
, dest
.align
.abi() as i32);
106 let size
= C_usize(bx
.cx
, dest
.layout
.size
.bytes());
108 // Use llvm.memset.p0i8.* to initialize all zero arrays
109 if common
::is_const_integral(v
) && common
::const_to_uint(v
) == 0 {
110 let fill
= C_u8(bx
.cx
, 0);
111 base
::call_memset(&bx
, start
, fill
, size
, align
, false);
115 // Use llvm.memset.p0i8.* to initialize byte arrays
116 let v
= base
::from_immediate(&bx
, v
);
117 if common
::val_ty(v
) == Type
::i8(bx
.cx
) {
118 base
::call_memset(&bx
, start
, v
, size
, align
, false);
123 let count
= C_usize(bx
.cx
, count
);
124 let end
= dest
.project_index(&bx
, count
).llval
;
126 let header_bx
= bx
.build_sibling_block("repeat_loop_header");
127 let body_bx
= bx
.build_sibling_block("repeat_loop_body");
128 let next_bx
= bx
.build_sibling_block("repeat_loop_next");
130 bx
.br(header_bx
.llbb());
131 let current
= header_bx
.phi(common
::val_ty(start
), &[start
], &[bx
.llbb()]);
133 let keep_going
= header_bx
.icmp(llvm
::IntNE
, current
, end
);
134 header_bx
.cond_br(keep_going
, body_bx
.llbb(), next_bx
.llbb());
136 tr_elem
.val
.store(&body_bx
,
137 PlaceRef
::new_sized(current
, tr_elem
.layout
, dest
.align
));
139 let next
= body_bx
.inbounds_gep(current
, &[C_usize(bx
.cx
, 1)]);
140 body_bx
.br(header_bx
.llbb());
141 header_bx
.add_incoming_to_phi(current
, next
, body_bx
.llbb());
146 mir
::Rvalue
::Aggregate(ref kind
, ref operands
) => {
147 let (dest
, active_field_index
) = match **kind
{
148 mir
::AggregateKind
::Adt(adt_def
, variant_index
, _
, active_field_index
) => {
149 dest
.trans_set_discr(&bx
, variant_index
);
150 if adt_def
.is_enum() {
151 (dest
.project_downcast(&bx
, variant_index
), active_field_index
)
153 (dest
, active_field_index
)
158 for (i
, operand
) in operands
.iter().enumerate() {
159 let op
= self.trans_operand(&bx
, operand
);
160 // Do not generate stores and GEPis for zero-sized fields.
161 if !op
.layout
.is_zst() {
162 let field_index
= active_field_index
.unwrap_or(i
);
163 op
.val
.store(&bx
, dest
.project_field(&bx
, field_index
));
170 assert
!(self.rvalue_creates_operand(rvalue
));
171 let (bx
, temp
) = self.trans_rvalue_operand(bx
, rvalue
);
172 temp
.val
.store(&bx
, dest
);
178 pub fn trans_rvalue_operand(&mut self,
179 bx
: Builder
<'a
, 'tcx
>,
180 rvalue
: &mir
::Rvalue
<'tcx
>)
181 -> (Builder
<'a
, 'tcx
>, OperandRef
<'tcx
>)
183 assert
!(self.rvalue_creates_operand(rvalue
), "cannot trans {:?} to operand", rvalue
);
186 mir
::Rvalue
::Cast(ref kind
, ref source
, mir_cast_ty
) => {
187 let operand
= self.trans_operand(&bx
, source
);
188 debug
!("cast operand is {:?}", operand
);
189 let cast
= bx
.cx
.layout_of(self.monomorphize(&mir_cast_ty
));
191 let val
= match *kind
{
192 mir
::CastKind
::ReifyFnPointer
=> {
193 match operand
.layout
.ty
.sty
{
194 ty
::TyFnDef(def_id
, substs
) => {
195 if bx
.cx
.tcx
.has_attr(def_id
, "rustc_args_required_const") {
196 bug
!("reifying a fn ptr that requires \
199 OperandValue
::Immediate(
200 callee
::resolve_and_get_fn(bx
.cx
, def_id
, substs
))
203 bug
!("{} cannot be reified to a fn ptr", operand
.layout
.ty
)
207 mir
::CastKind
::ClosureFnPointer
=> {
208 match operand
.layout
.ty
.sty
{
209 ty
::TyClosure(def_id
, substs
) => {
210 let instance
= monomorphize
::resolve_closure(
211 bx
.cx
.tcx
, def_id
, substs
, ty
::ClosureKind
::FnOnce
);
212 OperandValue
::Immediate(callee
::get_fn(bx
.cx
, instance
))
215 bug
!("{} cannot be cast to a fn ptr", operand
.layout
.ty
)
219 mir
::CastKind
::UnsafeFnPointer
=> {
220 // this is a no-op at the LLVM level
223 mir
::CastKind
::Unsize
=> {
224 assert
!(cast
.is_llvm_scalar_pair());
226 OperandValue
::Pair(lldata
, llextra
) => {
227 // unsize from a fat pointer - this is a
228 // "trait-object-to-supertrait" coercion, for
230 // &'a fmt::Debug+Send => &'a fmt::Debug,
232 // HACK(eddyb) have to bitcast pointers
233 // until LLVM removes pointee types.
234 let lldata
= bx
.pointercast(lldata
,
235 cast
.scalar_pair_element_llvm_type(bx
.cx
, 0));
236 OperandValue
::Pair(lldata
, llextra
)
238 OperandValue
::Immediate(lldata
) => {
240 let (lldata
, llextra
) = base
::unsize_thin_ptr(&bx
, lldata
,
241 operand
.layout
.ty
, cast
.ty
);
242 OperandValue
::Pair(lldata
, llextra
)
244 OperandValue
::Ref(..) => {
245 bug
!("by-ref operand {:?} in trans_rvalue_operand",
250 mir
::CastKind
::Misc
if operand
.layout
.is_llvm_scalar_pair() => {
251 if let OperandValue
::Pair(data_ptr
, meta
) = operand
.val
{
252 if cast
.is_llvm_scalar_pair() {
253 let data_cast
= bx
.pointercast(data_ptr
,
254 cast
.scalar_pair_element_llvm_type(bx
.cx
, 0));
255 OperandValue
::Pair(data_cast
, meta
)
256 } else { // cast to thin-ptr
257 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
258 // pointer-cast of that pointer to desired pointer type.
259 let llcast_ty
= cast
.immediate_llvm_type(bx
.cx
);
260 let llval
= bx
.pointercast(data_ptr
, llcast_ty
);
261 OperandValue
::Immediate(llval
)
264 bug
!("Unexpected non-Pair operand")
267 mir
::CastKind
::Misc
=> {
268 assert
!(cast
.is_llvm_immediate());
269 let ll_t_out
= cast
.immediate_llvm_type(bx
.cx
);
270 if operand
.layout
.abi
== layout
::Abi
::Uninhabited
{
271 return (bx
, OperandRef
{
272 val
: OperandValue
::Immediate(C_undef(ll_t_out
)),
276 let r_t_in
= CastTy
::from_ty(operand
.layout
.ty
)
277 .expect("bad input type for cast");
278 let r_t_out
= CastTy
::from_ty(cast
.ty
).expect("bad output type for cast");
279 let ll_t_in
= operand
.layout
.immediate_llvm_type(bx
.cx
);
280 match operand
.layout
.variants
{
281 layout
::Variants
::Single { index }
=> {
282 if let Some(def
) = operand
.layout
.ty
.ty_adt_def() {
284 .discriminant_for_variant(bx
.cx
.tcx
, index
)
286 let discr
= C_uint_big(ll_t_out
, discr_val
);
287 return (bx
, OperandRef
{
288 val
: OperandValue
::Immediate(discr
),
293 layout
::Variants
::Tagged { .. }
|
294 layout
::Variants
::NicheFilling { .. }
=> {}
,
296 let llval
= operand
.immediate();
298 let mut signed
= false;
299 if let layout
::Abi
::Scalar(ref scalar
) = operand
.layout
.abi
{
300 if let layout
::Int(_
, s
) = scalar
.value
{
303 if scalar
.valid_range
.end() > scalar
.valid_range
.start() {
304 // We want `table[e as usize]` to not
305 // have bound checks, and this is the most
306 // convenient place to put the `assume`.
308 base
::call_assume(&bx
, bx
.icmp(
311 C_uint_big(ll_t_in
, *scalar
.valid_range
.end())
317 let newval
= match (r_t_in
, r_t_out
) {
318 (CastTy
::Int(_
), CastTy
::Int(_
)) => {
319 bx
.intcast(llval
, ll_t_out
, signed
)
321 (CastTy
::Float
, CastTy
::Float
) => {
322 let srcsz
= ll_t_in
.float_width();
323 let dstsz
= ll_t_out
.float_width();
325 bx
.fpext(llval
, ll_t_out
)
326 } else if srcsz
> dstsz
{
327 bx
.fptrunc(llval
, ll_t_out
)
332 (CastTy
::Ptr(_
), CastTy
::Ptr(_
)) |
333 (CastTy
::FnPtr
, CastTy
::Ptr(_
)) |
334 (CastTy
::RPtr(_
), CastTy
::Ptr(_
)) =>
335 bx
.pointercast(llval
, ll_t_out
),
336 (CastTy
::Ptr(_
), CastTy
::Int(_
)) |
337 (CastTy
::FnPtr
, CastTy
::Int(_
)) =>
338 bx
.ptrtoint(llval
, ll_t_out
),
339 (CastTy
::Int(_
), CastTy
::Ptr(_
)) => {
340 let usize_llval
= bx
.intcast(llval
, bx
.cx
.isize_ty
, signed
);
341 bx
.inttoptr(usize_llval
, ll_t_out
)
343 (CastTy
::Int(_
), CastTy
::Float
) =>
344 cast_int_to_float(&bx
, signed
, llval
, ll_t_in
, ll_t_out
),
345 (CastTy
::Float
, CastTy
::Int(IntTy
::I
)) =>
346 cast_float_to_int(&bx
, true, llval
, ll_t_in
, ll_t_out
),
347 (CastTy
::Float
, CastTy
::Int(_
)) =>
348 cast_float_to_int(&bx
, false, llval
, ll_t_in
, ll_t_out
),
349 _
=> bug
!("unsupported cast: {:?} to {:?}", operand
.layout
.ty
, cast
.ty
)
351 OperandValue
::Immediate(newval
)
360 mir
::Rvalue
::Ref(_
, bk
, ref place
) => {
361 let tr_place
= self.trans_place(&bx
, place
);
363 let ty
= tr_place
.layout
.ty
;
365 // Note: places are indirect, so storing the `llval` into the
366 // destination effectively creates a reference.
367 let val
= if !bx
.cx
.type_has_metadata(ty
) {
368 OperandValue
::Immediate(tr_place
.llval
)
370 OperandValue
::Pair(tr_place
.llval
, tr_place
.llextra
)
374 layout
: self.cx
.layout_of(self.cx
.tcx
.mk_ref(
375 self.cx
.tcx
.types
.re_erased
,
376 ty
::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
381 mir
::Rvalue
::Len(ref place
) => {
382 let size
= self.evaluate_array_len(&bx
, place
);
383 let operand
= OperandRef
{
384 val
: OperandValue
::Immediate(size
),
385 layout
: bx
.cx
.layout_of(bx
.tcx().types
.usize),
390 mir
::Rvalue
::BinaryOp(op
, ref lhs
, ref rhs
) => {
391 let lhs
= self.trans_operand(&bx
, lhs
);
392 let rhs
= self.trans_operand(&bx
, rhs
);
393 let llresult
= match (lhs
.val
, rhs
.val
) {
394 (OperandValue
::Pair(lhs_addr
, lhs_extra
),
395 OperandValue
::Pair(rhs_addr
, rhs_extra
)) => {
396 self.trans_fat_ptr_binop(&bx
, op
,
402 (OperandValue
::Immediate(lhs_val
),
403 OperandValue
::Immediate(rhs_val
)) => {
404 self.trans_scalar_binop(&bx
, op
, lhs_val
, rhs_val
, lhs
.layout
.ty
)
409 let operand
= OperandRef
{
410 val
: OperandValue
::Immediate(llresult
),
411 layout
: bx
.cx
.layout_of(
412 op
.ty(bx
.tcx(), lhs
.layout
.ty
, rhs
.layout
.ty
)),
416 mir
::Rvalue
::CheckedBinaryOp(op
, ref lhs
, ref rhs
) => {
417 let lhs
= self.trans_operand(&bx
, lhs
);
418 let rhs
= self.trans_operand(&bx
, rhs
);
419 let result
= self.trans_scalar_checked_binop(&bx
, op
,
420 lhs
.immediate(), rhs
.immediate(),
422 let val_ty
= op
.ty(bx
.tcx(), lhs
.layout
.ty
, rhs
.layout
.ty
);
423 let operand_ty
= bx
.tcx().intern_tup(&[val_ty
, bx
.tcx().types
.bool
]);
424 let operand
= OperandRef
{
426 layout
: bx
.cx
.layout_of(operand_ty
)
432 mir
::Rvalue
::UnaryOp(op
, ref operand
) => {
433 let operand
= self.trans_operand(&bx
, operand
);
434 let lloperand
= operand
.immediate();
435 let is_float
= operand
.layout
.ty
.is_fp();
436 let llval
= match op
{
437 mir
::UnOp
::Not
=> bx
.not(lloperand
),
438 mir
::UnOp
::Neg
=> if is_float
{
445 val
: OperandValue
::Immediate(llval
),
446 layout
: operand
.layout
,
450 mir
::Rvalue
::Discriminant(ref place
) => {
451 let discr_ty
= rvalue
.ty(&*self.mir
, bx
.tcx());
452 let discr
= self.trans_place(&bx
, place
)
453 .trans_get_discr(&bx
, discr_ty
);
455 val
: OperandValue
::Immediate(discr
),
456 layout
: self.cx
.layout_of(discr_ty
)
460 mir
::Rvalue
::NullaryOp(mir
::NullOp
::SizeOf
, ty
) => {
461 assert
!(bx
.cx
.type_is_sized(ty
));
462 let val
= C_usize(bx
.cx
, bx
.cx
.size_of(ty
).bytes());
465 val
: OperandValue
::Immediate(val
),
466 layout
: self.cx
.layout_of(tcx
.types
.usize),
470 mir
::Rvalue
::NullaryOp(mir
::NullOp
::Box
, content_ty
) => {
471 let content_ty
: Ty
<'tcx
> = self.monomorphize(&content_ty
);
472 let (size
, align
) = bx
.cx
.size_and_align_of(content_ty
);
473 let llsize
= C_usize(bx
.cx
, size
.bytes());
474 let llalign
= C_usize(bx
.cx
, align
.abi());
475 let box_layout
= bx
.cx
.layout_of(bx
.tcx().mk_box(content_ty
));
476 let llty_ptr
= box_layout
.llvm_type(bx
.cx
);
479 let def_id
= match bx
.tcx().lang_items().require(ExchangeMallocFnLangItem
) {
482 bx
.sess().fatal(&format
!("allocation of `{}` {}", box_layout
.ty
, s
));
485 let instance
= ty
::Instance
::mono(bx
.tcx(), def_id
);
486 let r
= callee
::get_fn(bx
.cx
, instance
);
487 let val
= bx
.pointercast(bx
.call(r
, &[llsize
, llalign
], None
), llty_ptr
);
489 let operand
= OperandRef
{
490 val
: OperandValue
::Immediate(val
),
495 mir
::Rvalue
::Use(ref operand
) => {
496 let operand
= self.trans_operand(&bx
, operand
);
499 mir
::Rvalue
::Repeat(..) |
500 mir
::Rvalue
::Aggregate(..) => {
501 // According to `rvalue_creates_operand`, only ZST
502 // aggregate rvalues are allowed to be operands.
503 let ty
= rvalue
.ty(self.mir
, self.cx
.tcx
);
504 (bx
, OperandRef
::new_zst(self.cx
,
505 self.cx
.layout_of(self.monomorphize(&ty
))))
510 fn evaluate_array_len(&mut self,
511 bx
: &Builder
<'a
, 'tcx
>,
512 place
: &mir
::Place
<'tcx
>) -> ValueRef
514 // ZST are passed as operands and require special handling
515 // because trans_place() panics if Local is operand.
516 if let mir
::Place
::Local(index
) = *place
{
517 if let LocalRef
::Operand(Some(op
)) = self.locals
[index
] {
518 if let ty
::TyArray(_
, n
) = op
.layout
.ty
.sty
{
519 let n
= n
.val
.unwrap_u64();
520 return common
::C_usize(bx
.cx
, n
);
524 // use common size calculation for non zero-sized types
525 let tr_value
= self.trans_place(&bx
, place
);
526 return tr_value
.len(bx
.cx
);
529 pub fn trans_scalar_binop(&mut self,
530 bx
: &Builder
<'a
, 'tcx
>,
534 input_ty
: Ty
<'tcx
>) -> ValueRef
{
535 let is_float
= input_ty
.is_fp();
536 let is_signed
= input_ty
.is_signed();
537 let is_nil
= input_ty
.is_nil();
539 mir
::BinOp
::Add
=> if is_float
{
544 mir
::BinOp
::Sub
=> if is_float
{
549 mir
::BinOp
::Mul
=> if is_float
{
554 mir
::BinOp
::Div
=> if is_float
{
556 } else if is_signed
{
561 mir
::BinOp
::Rem
=> if is_float
{
563 } else if is_signed
{
568 mir
::BinOp
::BitOr
=> bx
.or(lhs
, rhs
),
569 mir
::BinOp
::BitAnd
=> bx
.and(lhs
, rhs
),
570 mir
::BinOp
::BitXor
=> bx
.xor(lhs
, rhs
),
571 mir
::BinOp
::Offset
=> bx
.inbounds_gep(lhs
, &[rhs
]),
572 mir
::BinOp
::Shl
=> common
::build_unchecked_lshift(bx
, lhs
, rhs
),
573 mir
::BinOp
::Shr
=> common
::build_unchecked_rshift(bx
, input_ty
, lhs
, rhs
),
574 mir
::BinOp
::Ne
| mir
::BinOp
::Lt
| mir
::BinOp
::Gt
|
575 mir
::BinOp
::Eq
| mir
::BinOp
::Le
| mir
::BinOp
::Ge
=> if is_nil
{
576 C_bool(bx
.cx
, match op
{
577 mir
::BinOp
::Ne
| mir
::BinOp
::Lt
| mir
::BinOp
::Gt
=> false,
578 mir
::BinOp
::Eq
| mir
::BinOp
::Le
| mir
::BinOp
::Ge
=> true,
583 base
::bin_op_to_fcmp_predicate(op
.to_hir_binop()),
588 base
::bin_op_to_icmp_predicate(op
.to_hir_binop(), is_signed
),
595 pub fn trans_fat_ptr_binop(&mut self,
596 bx
: &Builder
<'a
, 'tcx
>,
607 bx
.icmp(llvm
::IntEQ
, lhs_addr
, rhs_addr
),
608 bx
.icmp(llvm
::IntEQ
, lhs_extra
, rhs_extra
)
613 bx
.icmp(llvm
::IntNE
, lhs_addr
, rhs_addr
),
614 bx
.icmp(llvm
::IntNE
, lhs_extra
, rhs_extra
)
617 mir
::BinOp
::Le
| mir
::BinOp
::Lt
|
618 mir
::BinOp
::Ge
| mir
::BinOp
::Gt
=> {
619 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
620 let (op
, strict_op
) = match op
{
621 mir
::BinOp
::Lt
=> (llvm
::IntULT
, llvm
::IntULT
),
622 mir
::BinOp
::Le
=> (llvm
::IntULE
, llvm
::IntULT
),
623 mir
::BinOp
::Gt
=> (llvm
::IntUGT
, llvm
::IntUGT
),
624 mir
::BinOp
::Ge
=> (llvm
::IntUGE
, llvm
::IntUGT
),
629 bx
.icmp(strict_op
, lhs_addr
, rhs_addr
),
631 bx
.icmp(llvm
::IntEQ
, lhs_addr
, rhs_addr
),
632 bx
.icmp(op
, lhs_extra
, rhs_extra
)
637 bug
!("unexpected fat ptr binop");
642 pub fn trans_scalar_checked_binop(&mut self,
643 bx
: &Builder
<'a
, 'tcx
>,
647 input_ty
: Ty
<'tcx
>) -> OperandValue
{
648 // This case can currently arise only from functions marked
649 // with #[rustc_inherit_overflow_checks] and inlined from
650 // another crate (mostly core::num generic/#[inline] fns),
651 // while the current crate doesn't use overflow checks.
652 if !bx
.cx
.check_overflow
{
653 let val
= self.trans_scalar_binop(bx
, op
, lhs
, rhs
, input_ty
);
654 return OperandValue
::Pair(val
, C_bool(bx
.cx
, false));
657 let (val
, of
) = match op
{
658 // These are checked using intrinsics
659 mir
::BinOp
::Add
| mir
::BinOp
::Sub
| mir
::BinOp
::Mul
=> {
661 mir
::BinOp
::Add
=> OverflowOp
::Add
,
662 mir
::BinOp
::Sub
=> OverflowOp
::Sub
,
663 mir
::BinOp
::Mul
=> OverflowOp
::Mul
,
666 let intrinsic
= get_overflow_intrinsic(oop
, bx
, input_ty
);
667 let res
= bx
.call(intrinsic
, &[lhs
, rhs
], None
);
669 (bx
.extract_value(res
, 0),
670 bx
.extract_value(res
, 1))
672 mir
::BinOp
::Shl
| mir
::BinOp
::Shr
=> {
673 let lhs_llty
= val_ty(lhs
);
674 let rhs_llty
= val_ty(rhs
);
675 let invert_mask
= common
::shift_mask_val(&bx
, lhs_llty
, rhs_llty
, true);
676 let outer_bits
= bx
.and(rhs
, invert_mask
);
678 let of
= bx
.icmp(llvm
::IntNE
, outer_bits
, C_null(rhs_llty
));
679 let val
= self.trans_scalar_binop(bx
, op
, lhs
, rhs
, input_ty
);
684 bug
!("Operator `{:?}` is not a checkable operator", op
)
688 OperandValue
::Pair(val
, of
)
691 pub fn rvalue_creates_operand(&self, rvalue
: &mir
::Rvalue
<'tcx
>) -> bool
{
693 mir
::Rvalue
::Ref(..) |
694 mir
::Rvalue
::Len(..) |
695 mir
::Rvalue
::Cast(..) | // (*)
696 mir
::Rvalue
::BinaryOp(..) |
697 mir
::Rvalue
::CheckedBinaryOp(..) |
698 mir
::Rvalue
::UnaryOp(..) |
699 mir
::Rvalue
::Discriminant(..) |
700 mir
::Rvalue
::NullaryOp(..) |
701 mir
::Rvalue
::Use(..) => // (*)
703 mir
::Rvalue
::Repeat(..) |
704 mir
::Rvalue
::Aggregate(..) => {
705 let ty
= rvalue
.ty(self.mir
, self.cx
.tcx
);
706 let ty
= self.monomorphize(&ty
);
707 self.cx
.layout_of(ty
).is_zst()
711 // (*) this is only true if the type is suitable
715 #[derive(Copy, Clone)]
720 fn get_overflow_intrinsic(oop
: OverflowOp
, bx
: &Builder
, ty
: Ty
) -> ValueRef
{
721 use syntax
::ast
::IntTy
::*;
722 use syntax
::ast
::UintTy
::*;
723 use rustc
::ty
::{TyInt, TyUint}
;
727 let new_sty
= match ty
.sty
{
728 TyInt(Isize
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
732 _
=> panic
!("unsupported target word size")
734 TyUint(Usize
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
738 _
=> panic
!("unsupported target word size")
740 ref t @
TyUint(_
) | ref t @
TyInt(_
) => t
.clone(),
741 _
=> panic
!("tried to get overflow intrinsic for op applied to non-int type")
744 let name
= match oop
{
745 OverflowOp
::Add
=> match new_sty
{
746 TyInt(I8
) => "llvm.sadd.with.overflow.i8",
747 TyInt(I16
) => "llvm.sadd.with.overflow.i16",
748 TyInt(I32
) => "llvm.sadd.with.overflow.i32",
749 TyInt(I64
) => "llvm.sadd.with.overflow.i64",
750 TyInt(I128
) => "llvm.sadd.with.overflow.i128",
752 TyUint(U8
) => "llvm.uadd.with.overflow.i8",
753 TyUint(U16
) => "llvm.uadd.with.overflow.i16",
754 TyUint(U32
) => "llvm.uadd.with.overflow.i32",
755 TyUint(U64
) => "llvm.uadd.with.overflow.i64",
756 TyUint(U128
) => "llvm.uadd.with.overflow.i128",
760 OverflowOp
::Sub
=> match new_sty
{
761 TyInt(I8
) => "llvm.ssub.with.overflow.i8",
762 TyInt(I16
) => "llvm.ssub.with.overflow.i16",
763 TyInt(I32
) => "llvm.ssub.with.overflow.i32",
764 TyInt(I64
) => "llvm.ssub.with.overflow.i64",
765 TyInt(I128
) => "llvm.ssub.with.overflow.i128",
767 TyUint(U8
) => "llvm.usub.with.overflow.i8",
768 TyUint(U16
) => "llvm.usub.with.overflow.i16",
769 TyUint(U32
) => "llvm.usub.with.overflow.i32",
770 TyUint(U64
) => "llvm.usub.with.overflow.i64",
771 TyUint(U128
) => "llvm.usub.with.overflow.i128",
775 OverflowOp
::Mul
=> match new_sty
{
776 TyInt(I8
) => "llvm.smul.with.overflow.i8",
777 TyInt(I16
) => "llvm.smul.with.overflow.i16",
778 TyInt(I32
) => "llvm.smul.with.overflow.i32",
779 TyInt(I64
) => "llvm.smul.with.overflow.i64",
780 TyInt(I128
) => "llvm.smul.with.overflow.i128",
782 TyUint(U8
) => "llvm.umul.with.overflow.i8",
783 TyUint(U16
) => "llvm.umul.with.overflow.i16",
784 TyUint(U32
) => "llvm.umul.with.overflow.i32",
785 TyUint(U64
) => "llvm.umul.with.overflow.i64",
786 TyUint(U128
) => "llvm.umul.with.overflow.i128",
792 bx
.cx
.get_intrinsic(&name
)
795 fn cast_int_to_float(bx
: &Builder
,
799 float_ty
: Type
) -> ValueRef
{
800 // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
801 // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
802 // LLVM's uitofp produces undef in those cases, so we manually check for that case.
803 let is_u128_to_f32
= !signed
&& int_ty
.int_width() == 128 && float_ty
.float_width() == 32;
805 // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
806 // and for everything else LLVM's uitofp works just fine.
807 use rustc_apfloat
::ieee
::Single
;
808 use rustc_apfloat
::Float
;
809 const MAX_F32_PLUS_HALF_ULP
: u128
= ((1 << (Single
::PRECISION
+ 1)) - 1)
810 << (Single
::MAX_EXP
- Single
::PRECISION
as i16);
811 let max
= C_uint_big(int_ty
, MAX_F32_PLUS_HALF_ULP
);
812 let overflow
= bx
.icmp(llvm
::IntUGE
, x
, max
);
813 let infinity_bits
= C_u32(bx
.cx
, ieee
::Single
::INFINITY
.to_bits() as u32);
814 let infinity
= consts
::bitcast(infinity_bits
, float_ty
);
815 bx
.select(overflow
, infinity
, bx
.uitofp(x
, float_ty
))
818 bx
.sitofp(x
, float_ty
)
820 bx
.uitofp(x
, float_ty
)
825 fn cast_float_to_int(bx
: &Builder
,
829 int_ty
: Type
) -> ValueRef
{
830 let fptosui_result
= if signed
{
836 if !bx
.sess().opts
.debugging_opts
.saturating_float_casts
{
837 return fptosui_result
;
839 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
840 // destination integer type after rounding towards zero. This `undef` value can cause UB in
841 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
842 // Semantically, the mathematical value of the input is rounded towards zero to the next
843 // mathematical integer, and then the result is clamped into the range of the destination
844 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
845 // the destination integer type. NaN is mapped to 0.
847 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
848 // a value representable in int_ty.
849 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
850 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
851 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
852 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
853 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
854 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
855 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
856 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
857 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
858 fn compute_clamp_bounds
<F
: Float
>(signed
: bool
, int_ty
: Type
) -> (u128
, u128
) {
859 let rounded_min
= F
::from_i128_r(int_min(signed
, int_ty
), Round
::TowardZero
);
860 assert_eq
!(rounded_min
.status
, Status
::OK
);
861 let rounded_max
= F
::from_u128_r(int_max(signed
, int_ty
), Round
::TowardZero
);
862 assert
!(rounded_max
.value
.is_finite());
863 (rounded_min
.value
.to_bits(), rounded_max
.value
.to_bits())
865 fn int_max(signed
: bool
, int_ty
: Type
) -> u128
{
866 let shift_amount
= 128 - int_ty
.int_width();
868 i128
::MAX
as u128
>> shift_amount
870 u128
::MAX
>> shift_amount
873 fn int_min(signed
: bool
, int_ty
: Type
) -> i128
{
875 i128
::MIN
>> (128 - int_ty
.int_width())
880 let float_bits_to_llval
= |bits
| {
881 let bits_llval
= match float_ty
.float_width() {
882 32 => C_u32(bx
.cx
, bits
as u32),
883 64 => C_u64(bx
.cx
, bits
as u64),
884 n
=> bug
!("unsupported float width {}", n
),
886 consts
::bitcast(bits_llval
, float_ty
)
888 let (f_min
, f_max
) = match float_ty
.float_width() {
889 32 => compute_clamp_bounds
::<ieee
::Single
>(signed
, int_ty
),
890 64 => compute_clamp_bounds
::<ieee
::Double
>(signed
, int_ty
),
891 n
=> bug
!("unsupported float width {}", n
),
893 let f_min
= float_bits_to_llval(f_min
);
894 let f_max
= float_bits_to_llval(f_max
);
895 // To implement saturation, we perform the following steps:
897 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
898 // 2. Compare x to f_min and f_max, and use the comparison results to select:
899 // a) int_ty::MIN if x < f_min or x is NaN
900 // b) int_ty::MAX if x > f_max
901 // c) the result of fpto[su]i otherwise
902 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
904 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
905 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
906 // undef does not introduce any non-determinism either.
907 // More importantly, the above procedure correctly implements saturating conversion.
909 // If x is NaN, 0 is returned by definition.
910 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
911 // This yields three cases to consider:
912 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
913 // saturating conversion for inputs in that range.
914 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
915 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
916 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
918 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
919 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
922 // Step 1 was already performed above.
924 // Step 2: We use two comparisons and two selects, with %s1 being the result:
925 // %less_or_nan = fcmp ult %x, %f_min
926 // %greater = fcmp olt %x, %f_max
927 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
928 // %s1 = select %greater, int_ty::MAX, %s0
929 // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
930 // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
931 // becomes int_ty::MIN if x is NaN.
932 // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
933 // negation, and the negation can be merged into the select. Therefore, it not necessarily any
934 // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
935 // performed is ultimately up to the backend, but at least x86 does perform them.
936 let less_or_nan
= bx
.fcmp(llvm
::RealULT
, x
, f_min
);
937 let greater
= bx
.fcmp(llvm
::RealOGT
, x
, f_max
);
938 let int_max
= C_uint_big(int_ty
, int_max(signed
, int_ty
));
939 let int_min
= C_uint_big(int_ty
, int_min(signed
, int_ty
) as u128
);
940 let s0
= bx
.select(less_or_nan
, int_min
, fptosui_result
);
941 let s1
= bx
.select(greater
, int_max
, s0
);
943 // Step 3: NaN replacement.
944 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
945 // Therefore we only need to execute this step for signed integer types.
947 // LLVM has no isNaN predicate, so we use (x == x) instead
948 bx
.select(bx
.fcmp(llvm
::RealOEQ
, x
, x
), s1
, C_uint(int_ty
, 0))