1 use super::operand
::{OperandRef, OperandValue}
;
2 use super::place
::PlaceRef
;
3 use super::{FunctionCx, LocalRef}
;
6 use crate::common
::{self, IntPredicate, RealPredicate}
;
10 use rustc_apfloat
::{ieee, Float, Round, Status}
;
11 use rustc_hir
::lang_items
::LangItem
;
12 use rustc_middle
::mir
;
13 use rustc_middle
::ty
::cast
::{CastTy, IntTy}
;
14 use rustc_middle
::ty
::layout
::{HasTyCtxt, TyAndLayout}
;
15 use rustc_middle
::ty
::{self, adjustment::PointerCast, Instance, Ty, TyCtxt}
;
16 use rustc_span
::source_map
::{Span, DUMMY_SP}
;
17 use rustc_span
::symbol
::sym
;
18 use rustc_target
::abi
::{Abi, Int, LayoutOf, Variants}
;
20 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
21 pub fn codegen_rvalue(
24 dest
: PlaceRef
<'tcx
, Bx
::Value
>,
25 rvalue
: &mir
::Rvalue
<'tcx
>,
27 debug
!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest
.llval
, rvalue
);
30 mir
::Rvalue
::Use(ref operand
) => {
31 let cg_operand
= self.codegen_operand(&mut bx
, operand
);
32 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
33 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
34 cg_operand
.val
.store(&mut bx
, dest
);
38 mir
::Rvalue
::Cast(mir
::CastKind
::Pointer(PointerCast
::Unsize
), ref source
, _
) => {
39 // The destination necessarily contains a fat pointer, so if
40 // it's a scalar pair, it's a fat pointer or newtype thereof.
41 if bx
.cx().is_backend_scalar_pair(dest
.layout
) {
42 // Into-coerce of a thin pointer to a fat pointer -- just
43 // use the operand path.
44 let (mut bx
, temp
) = self.codegen_rvalue_operand(bx
, rvalue
);
45 temp
.val
.store(&mut bx
, dest
);
49 // Unsize of a nontrivial struct. I would prefer for
50 // this to be eliminated by MIR building, but
51 // `CoerceUnsized` can be passed by a where-clause,
52 // so the (generic) MIR may not be able to expand it.
53 let operand
= self.codegen_operand(&mut bx
, source
);
55 OperandValue
::Pair(..) | OperandValue
::Immediate(_
) => {
56 // Unsize from an immediate structure. We don't
57 // really need a temporary alloca here, but
58 // avoiding it would require us to have
59 // `coerce_unsized_into` use `extractvalue` to
60 // index into the struct, and this case isn't
61 // important enough for it.
62 debug
!("codegen_rvalue: creating ugly alloca");
63 let scratch
= PlaceRef
::alloca(&mut bx
, operand
.layout
);
64 scratch
.storage_live(&mut bx
);
65 operand
.val
.store(&mut bx
, scratch
);
66 base
::coerce_unsized_into(&mut bx
, scratch
, dest
);
67 scratch
.storage_dead(&mut bx
);
69 OperandValue
::Ref(llref
, None
, align
) => {
70 let source
= PlaceRef
::new_sized_aligned(llref
, operand
.layout
, align
);
71 base
::coerce_unsized_into(&mut bx
, source
, dest
);
73 OperandValue
::Ref(_
, Some(_
), _
) => {
74 bug
!("unsized coercion on an unsized rvalue");
80 mir
::Rvalue
::Repeat(ref elem
, count
) => {
81 let cg_elem
= self.codegen_operand(&mut bx
, elem
);
83 // Do not generate the loop for zero-sized elements or empty arrays.
84 if dest
.layout
.is_zst() {
88 if let OperandValue
::Immediate(v
) = cg_elem
.val
{
89 let zero
= bx
.const_usize(0);
90 let start
= dest
.project_index(&mut bx
, zero
).llval
;
91 let size
= bx
.const_usize(dest
.layout
.size
.bytes());
93 // Use llvm.memset.p0i8.* to initialize all zero arrays
94 if bx
.cx().const_to_opt_uint(v
) == Some(0) {
95 let fill
= bx
.cx().const_u8(0);
96 bx
.memset(start
, fill
, size
, dest
.align
, MemFlags
::empty());
100 // Use llvm.memset.p0i8.* to initialize byte arrays
101 let v
= bx
.from_immediate(v
);
102 if bx
.cx().val_ty(v
) == bx
.cx().type_i8() {
103 bx
.memset(start
, v
, size
, dest
.align
, MemFlags
::empty());
109 self.monomorphize(&count
).eval_usize(bx
.cx().tcx(), ty
::ParamEnv
::reveal_all());
111 bx
.write_operand_repeatedly(cg_elem
, count
, dest
)
114 mir
::Rvalue
::Aggregate(ref kind
, ref operands
) => {
115 let (dest
, active_field_index
) = match **kind
{
116 mir
::AggregateKind
::Adt(adt_def
, variant_index
, _
, _
, active_field_index
) => {
117 dest
.codegen_set_discr(&mut bx
, variant_index
);
118 if adt_def
.is_enum() {
119 (dest
.project_downcast(&mut bx
, variant_index
), active_field_index
)
121 (dest
, active_field_index
)
126 for (i
, operand
) in operands
.iter().enumerate() {
127 let op
= self.codegen_operand(&mut bx
, operand
);
128 // Do not generate stores and GEPis for zero-sized fields.
129 if !op
.layout
.is_zst() {
130 let field_index
= active_field_index
.unwrap_or(i
);
131 let field
= dest
.project_field(&mut bx
, field_index
);
132 op
.val
.store(&mut bx
, field
);
139 assert
!(self.rvalue_creates_operand(rvalue
, DUMMY_SP
));
140 let (mut bx
, temp
) = self.codegen_rvalue_operand(bx
, rvalue
);
141 temp
.val
.store(&mut bx
, dest
);
147 pub fn codegen_rvalue_unsized(
150 indirect_dest
: PlaceRef
<'tcx
, Bx
::Value
>,
151 rvalue
: &mir
::Rvalue
<'tcx
>,
154 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
155 indirect_dest
.llval
, rvalue
159 mir
::Rvalue
::Use(ref operand
) => {
160 let cg_operand
= self.codegen_operand(&mut bx
, operand
);
161 cg_operand
.val
.store_unsized(&mut bx
, indirect_dest
);
165 _
=> bug
!("unsized assignment other than `Rvalue::Use`"),
169 pub fn codegen_rvalue_operand(
172 rvalue
: &mir
::Rvalue
<'tcx
>,
173 ) -> (Bx
, OperandRef
<'tcx
, Bx
::Value
>) {
175 self.rvalue_creates_operand(rvalue
, DUMMY_SP
),
176 "cannot codegen {:?} to operand",
181 mir
::Rvalue
::Cast(ref kind
, ref source
, mir_cast_ty
) => {
182 let operand
= self.codegen_operand(&mut bx
, source
);
183 debug
!("cast operand is {:?}", operand
);
184 let cast
= bx
.cx().layout_of(self.monomorphize(&mir_cast_ty
));
186 let val
= match *kind
{
187 mir
::CastKind
::Pointer(PointerCast
::ReifyFnPointer
) => {
188 match *operand
.layout
.ty
.kind() {
189 ty
::FnDef(def_id
, substs
) => {
190 if bx
.cx().tcx().has_attr(def_id
, sym
::rustc_args_required_const
) {
191 bug
!("reifying a fn ptr that requires const arguments");
193 let instance
= ty
::Instance
::resolve_for_fn_ptr(
195 ty
::ParamEnv
::reveal_all(),
200 .polymorphize(bx
.cx().tcx());
201 OperandValue
::Immediate(bx
.get_fn_addr(instance
))
203 _
=> bug
!("{} cannot be reified to a fn ptr", operand
.layout
.ty
),
206 mir
::CastKind
::Pointer(PointerCast
::ClosureFnPointer(_
)) => {
207 match *operand
.layout
.ty
.kind() {
208 ty
::Closure(def_id
, substs
) => {
209 let instance
= Instance
::resolve_closure(
213 ty
::ClosureKind
::FnOnce
,
215 .polymorphize(bx
.cx().tcx());
216 OperandValue
::Immediate(bx
.cx().get_fn_addr(instance
))
218 _
=> bug
!("{} cannot be cast to a fn ptr", operand
.layout
.ty
),
221 mir
::CastKind
::Pointer(PointerCast
::UnsafeFnPointer
) => {
222 // This is a no-op at the LLVM level.
225 mir
::CastKind
::Pointer(PointerCast
::Unsize
) => {
226 assert
!(bx
.cx().is_backend_scalar_pair(cast
));
228 OperandValue
::Pair(lldata
, llextra
) => {
229 // unsize from a fat pointer -- this is a
230 // "trait-object-to-supertrait" coercion, for
231 // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
233 // HACK(eddyb) have to bitcast pointers
234 // until LLVM removes pointee types.
235 let lldata
= bx
.pointercast(
237 bx
.cx().scalar_pair_element_backend_type(cast
, 0, true),
239 OperandValue
::Pair(lldata
, llextra
)
241 OperandValue
::Immediate(lldata
) => {
243 let (lldata
, llextra
) = base
::unsize_thin_ptr(
249 OperandValue
::Pair(lldata
, llextra
)
251 OperandValue
::Ref(..) => {
252 bug
!("by-ref operand {:?} in `codegen_rvalue_operand`", operand
);
256 mir
::CastKind
::Pointer(PointerCast
::MutToConstPointer
)
257 | mir
::CastKind
::Misc
258 if bx
.cx().is_backend_scalar_pair(operand
.layout
) =>
260 if let OperandValue
::Pair(data_ptr
, meta
) = operand
.val
{
261 if bx
.cx().is_backend_scalar_pair(cast
) {
262 let data_cast
= bx
.pointercast(
264 bx
.cx().scalar_pair_element_backend_type(cast
, 0, true),
266 OperandValue
::Pair(data_cast
, meta
)
269 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
270 // pointer-cast of that pointer to desired pointer type.
271 let llcast_ty
= bx
.cx().immediate_backend_type(cast
);
272 let llval
= bx
.pointercast(data_ptr
, llcast_ty
);
273 OperandValue
::Immediate(llval
)
276 bug
!("unexpected non-pair operand");
279 mir
::CastKind
::Pointer(
280 PointerCast
::MutToConstPointer
| PointerCast
::ArrayToPointer
,
282 | mir
::CastKind
::Misc
=> {
283 assert
!(bx
.cx().is_backend_immediate(cast
));
284 let ll_t_out
= bx
.cx().immediate_backend_type(cast
);
285 if operand
.layout
.abi
.is_uninhabited() {
286 let val
= OperandValue
::Immediate(bx
.cx().const_undef(ll_t_out
));
287 return (bx
, OperandRef { val, layout: cast }
);
290 CastTy
::from_ty(operand
.layout
.ty
).expect("bad input type for cast");
291 let r_t_out
= CastTy
::from_ty(cast
.ty
).expect("bad output type for cast");
292 let ll_t_in
= bx
.cx().immediate_backend_type(operand
.layout
);
293 match operand
.layout
.variants
{
294 Variants
::Single { index }
=> {
296 operand
.layout
.ty
.discriminant_for_variant(bx
.tcx(), index
)
298 let discr_layout
= bx
.cx().layout_of(discr
.ty
);
299 let discr_t
= bx
.cx().immediate_backend_type(discr_layout
);
300 let discr_val
= bx
.cx().const_uint_big(discr_t
, discr
.val
);
302 bx
.intcast(discr_val
, ll_t_out
, discr
.ty
.is_signed());
307 val
: OperandValue
::Immediate(discr_val
),
313 Variants
::Multiple { .. }
=> {}
315 let llval
= operand
.immediate();
317 let mut signed
= false;
318 if let Abi
::Scalar(ref scalar
) = operand
.layout
.abi
{
319 if let Int(_
, s
) = scalar
.value
{
320 // We use `i1` for bytes that are always `0` or `1`,
321 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
322 // let LLVM interpret the `i1` as signed, because
323 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
324 signed
= !scalar
.is_bool() && s
;
326 let er
= scalar
.valid_range_exclusive(bx
.cx());
327 if er
.end
!= er
.start
328 && scalar
.valid_range
.end() > scalar
.valid_range
.start()
330 // We want `table[e as usize ± k]` to not
331 // have bound checks, and this is the most
332 // convenient place to put the `assume`s.
333 if *scalar
.valid_range
.start() > 0 {
334 let enum_value_lower_bound
= bx
336 .const_uint_big(ll_t_in
, *scalar
.valid_range
.start());
337 let cmp_start
= bx
.icmp(
338 IntPredicate
::IntUGE
,
340 enum_value_lower_bound
,
342 bx
.assume(cmp_start
);
345 let enum_value_upper_bound
=
346 bx
.cx().const_uint_big(ll_t_in
, *scalar
.valid_range
.end());
347 let cmp_end
= bx
.icmp(
348 IntPredicate
::IntULE
,
350 enum_value_upper_bound
,
357 let newval
= match (r_t_in
, r_t_out
) {
358 (CastTy
::Int(_
), CastTy
::Int(_
)) => bx
.intcast(llval
, ll_t_out
, signed
),
359 (CastTy
::Float
, CastTy
::Float
) => {
360 let srcsz
= bx
.cx().float_width(ll_t_in
);
361 let dstsz
= bx
.cx().float_width(ll_t_out
);
363 bx
.fpext(llval
, ll_t_out
)
364 } else if srcsz
> dstsz
{
365 bx
.fptrunc(llval
, ll_t_out
)
370 (CastTy
::Int(_
), CastTy
::Float
) => {
372 bx
.sitofp(llval
, ll_t_out
)
374 bx
.uitofp(llval
, ll_t_out
)
377 (CastTy
::Ptr(_
) | CastTy
::FnPtr
, CastTy
::Ptr(_
)) => {
378 bx
.pointercast(llval
, ll_t_out
)
380 (CastTy
::Ptr(_
) | CastTy
::FnPtr
, CastTy
::Int(_
)) => {
381 bx
.ptrtoint(llval
, ll_t_out
)
383 (CastTy
::Int(_
), CastTy
::Ptr(_
)) => {
384 let usize_llval
= bx
.intcast(llval
, bx
.cx().type_isize(), signed
);
385 bx
.inttoptr(usize_llval
, ll_t_out
)
387 (CastTy
::Float
, CastTy
::Int(IntTy
::I
)) => {
388 cast_float_to_int(&mut bx
, true, llval
, ll_t_in
, ll_t_out
, cast
)
390 (CastTy
::Float
, CastTy
::Int(_
)) => {
391 cast_float_to_int(&mut bx
, false, llval
, ll_t_in
, ll_t_out
, cast
)
393 _
=> bug
!("unsupported cast: {:?} to {:?}", operand
.layout
.ty
, cast
.ty
),
395 OperandValue
::Immediate(newval
)
398 (bx
, OperandRef { val, layout: cast }
)
401 mir
::Rvalue
::Ref(_
, bk
, place
) => {
402 let mk_ref
= move |tcx
: TyCtxt
<'tcx
>, ty
: Ty
<'tcx
>| {
404 tcx
.lifetimes
.re_erased
,
405 ty
::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
,
408 self.codegen_place_to_pointer(bx
, place
, mk_ref
)
411 mir
::Rvalue
::AddressOf(mutability
, place
) => {
412 let mk_ptr
= move |tcx
: TyCtxt
<'tcx
>, ty
: Ty
<'tcx
>| {
413 tcx
.mk_ptr(ty
::TypeAndMut { ty, mutbl: mutability }
)
415 self.codegen_place_to_pointer(bx
, place
, mk_ptr
)
418 mir
::Rvalue
::Len(place
) => {
419 let size
= self.evaluate_array_len(&mut bx
, place
);
420 let operand
= OperandRef
{
421 val
: OperandValue
::Immediate(size
),
422 layout
: bx
.cx().layout_of(bx
.tcx().types
.usize),
427 mir
::Rvalue
::BinaryOp(op
, ref lhs
, ref rhs
) => {
428 let lhs
= self.codegen_operand(&mut bx
, lhs
);
429 let rhs
= self.codegen_operand(&mut bx
, rhs
);
430 let llresult
= match (lhs
.val
, rhs
.val
) {
432 OperandValue
::Pair(lhs_addr
, lhs_extra
),
433 OperandValue
::Pair(rhs_addr
, rhs_extra
),
434 ) => self.codegen_fat_ptr_binop(
444 (OperandValue
::Immediate(lhs_val
), OperandValue
::Immediate(rhs_val
)) => {
445 self.codegen_scalar_binop(&mut bx
, op
, lhs_val
, rhs_val
, lhs
.layout
.ty
)
450 let operand
= OperandRef
{
451 val
: OperandValue
::Immediate(llresult
),
452 layout
: bx
.cx().layout_of(op
.ty(bx
.tcx(), lhs
.layout
.ty
, rhs
.layout
.ty
)),
456 mir
::Rvalue
::CheckedBinaryOp(op
, ref lhs
, ref rhs
) => {
457 let lhs
= self.codegen_operand(&mut bx
, lhs
);
458 let rhs
= self.codegen_operand(&mut bx
, rhs
);
459 let result
= self.codegen_scalar_checked_binop(
466 let val_ty
= op
.ty(bx
.tcx(), lhs
.layout
.ty
, rhs
.layout
.ty
);
467 let operand_ty
= bx
.tcx().intern_tup(&[val_ty
, bx
.tcx().types
.bool
]);
468 let operand
= OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
;
473 mir
::Rvalue
::UnaryOp(op
, ref operand
) => {
474 let operand
= self.codegen_operand(&mut bx
, operand
);
475 let lloperand
= operand
.immediate();
476 let is_float
= operand
.layout
.ty
.is_floating_point();
477 let llval
= match op
{
478 mir
::UnOp
::Not
=> bx
.not(lloperand
),
487 (bx
, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
)
490 mir
::Rvalue
::Discriminant(ref place
) => {
491 let discr_ty
= rvalue
.ty(self.mir
, bx
.tcx());
493 .codegen_place(&mut bx
, place
.as_ref())
494 .codegen_get_discr(&mut bx
, discr_ty
);
498 val
: OperandValue
::Immediate(discr
),
499 layout
: self.cx
.layout_of(discr_ty
),
504 mir
::Rvalue
::NullaryOp(mir
::NullOp
::SizeOf
, ty
) => {
505 assert
!(bx
.cx().type_is_sized(ty
));
506 let val
= bx
.cx().const_usize(bx
.cx().layout_of(ty
).size
.bytes());
507 let tcx
= self.cx
.tcx();
511 val
: OperandValue
::Immediate(val
),
512 layout
: self.cx
.layout_of(tcx
.types
.usize),
517 mir
::Rvalue
::NullaryOp(mir
::NullOp
::Box
, content_ty
) => {
518 let content_ty
= self.monomorphize(&content_ty
);
519 let content_layout
= bx
.cx().layout_of(content_ty
);
520 let llsize
= bx
.cx().const_usize(content_layout
.size
.bytes());
521 let llalign
= bx
.cx().const_usize(content_layout
.align
.abi
.bytes());
522 let box_layout
= bx
.cx().layout_of(bx
.tcx().mk_box(content_ty
));
523 let llty_ptr
= bx
.cx().backend_type(box_layout
);
526 let def_id
= match bx
.tcx().lang_items().require(LangItem
::ExchangeMalloc
) {
529 bx
.cx().sess().fatal(&format
!("allocation of `{}` {}", box_layout
.ty
, s
));
532 let instance
= ty
::Instance
::mono(bx
.tcx(), def_id
);
533 let r
= bx
.cx().get_fn_addr(instance
);
534 let call
= bx
.call(r
, &[llsize
, llalign
], None
);
535 let val
= bx
.pointercast(call
, llty_ptr
);
537 let operand
= OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
;
540 mir
::Rvalue
::ThreadLocalRef(def_id
) => {
541 assert
!(bx
.cx().tcx().is_static(def_id
));
542 let static_
= bx
.get_static(def_id
);
543 let layout
= bx
.layout_of(bx
.cx().tcx().static_ptr_ty(def_id
));
544 let operand
= OperandRef
::from_immediate_or_packed_pair(&mut bx
, static_
, layout
);
547 mir
::Rvalue
::Use(ref operand
) => {
548 let operand
= self.codegen_operand(&mut bx
, operand
);
551 mir
::Rvalue
::Repeat(..) | mir
::Rvalue
::Aggregate(..) => {
552 // According to `rvalue_creates_operand`, only ZST
553 // aggregate rvalues are allowed to be operands.
554 let ty
= rvalue
.ty(self.mir
, self.cx
.tcx());
556 OperandRef
::new_zst(&mut bx
, self.cx
.layout_of(self.monomorphize(&ty
)));
562 fn evaluate_array_len(&mut self, bx
: &mut Bx
, place
: mir
::Place
<'tcx
>) -> Bx
::Value
{
563 // ZST are passed as operands and require special handling
564 // because codegen_place() panics if Local is operand.
565 if let Some(index
) = place
.as_local() {
566 if let LocalRef
::Operand(Some(op
)) = self.locals
[index
] {
567 if let ty
::Array(_
, n
) = op
.layout
.ty
.kind() {
568 let n
= n
.eval_usize(bx
.cx().tcx(), ty
::ParamEnv
::reveal_all());
569 return bx
.cx().const_usize(n
);
573 // use common size calculation for non zero-sized types
574 let cg_value
= self.codegen_place(bx
, place
.as_ref());
575 cg_value
.len(bx
.cx())
578 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
579 fn codegen_place_to_pointer(
582 place
: mir
::Place
<'tcx
>,
583 mk_ptr_ty
: impl FnOnce(TyCtxt
<'tcx
>, Ty
<'tcx
>) -> Ty
<'tcx
>,
584 ) -> (Bx
, OperandRef
<'tcx
, Bx
::Value
>) {
585 let cg_place
= self.codegen_place(&mut bx
, place
.as_ref());
587 let ty
= cg_place
.layout
.ty
;
589 // Note: places are indirect, so storing the `llval` into the
590 // destination effectively creates a reference.
591 let val
= if !bx
.cx().type_has_metadata(ty
) {
592 OperandValue
::Immediate(cg_place
.llval
)
594 OperandValue
::Pair(cg_place
.llval
, cg_place
.llextra
.unwrap())
596 (bx
, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
)
599 pub fn codegen_scalar_binop(
607 let is_float
= input_ty
.is_floating_point();
608 let is_signed
= input_ty
.is_signed();
634 } else if is_signed
{
643 } else if is_signed
{
649 mir
::BinOp
::BitOr
=> bx
.or(lhs
, rhs
),
650 mir
::BinOp
::BitAnd
=> bx
.and(lhs
, rhs
),
651 mir
::BinOp
::BitXor
=> bx
.xor(lhs
, rhs
),
652 mir
::BinOp
::Offset
=> bx
.inbounds_gep(lhs
, &[rhs
]),
653 mir
::BinOp
::Shl
=> common
::build_unchecked_lshift(bx
, lhs
, rhs
),
654 mir
::BinOp
::Shr
=> common
::build_unchecked_rshift(bx
, input_ty
, lhs
, rhs
),
660 | mir
::BinOp
::Ge
=> {
662 bx
.fcmp(base
::bin_op_to_fcmp_predicate(op
.to_hir_binop()), lhs
, rhs
)
664 bx
.icmp(base
::bin_op_to_icmp_predicate(op
.to_hir_binop(), is_signed
), lhs
, rhs
)
670 pub fn codegen_fat_ptr_binop(
675 lhs_extra
: Bx
::Value
,
677 rhs_extra
: Bx
::Value
,
682 let lhs
= bx
.icmp(IntPredicate
::IntEQ
, lhs_addr
, rhs_addr
);
683 let rhs
= bx
.icmp(IntPredicate
::IntEQ
, lhs_extra
, rhs_extra
);
687 let lhs
= bx
.icmp(IntPredicate
::IntNE
, lhs_addr
, rhs_addr
);
688 let rhs
= bx
.icmp(IntPredicate
::IntNE
, lhs_extra
, rhs_extra
);
691 mir
::BinOp
::Le
| mir
::BinOp
::Lt
| mir
::BinOp
::Ge
| mir
::BinOp
::Gt
=> {
692 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
693 let (op
, strict_op
) = match op
{
694 mir
::BinOp
::Lt
=> (IntPredicate
::IntULT
, IntPredicate
::IntULT
),
695 mir
::BinOp
::Le
=> (IntPredicate
::IntULE
, IntPredicate
::IntULT
),
696 mir
::BinOp
::Gt
=> (IntPredicate
::IntUGT
, IntPredicate
::IntUGT
),
697 mir
::BinOp
::Ge
=> (IntPredicate
::IntUGE
, IntPredicate
::IntUGT
),
700 let lhs
= bx
.icmp(strict_op
, lhs_addr
, rhs_addr
);
701 let and_lhs
= bx
.icmp(IntPredicate
::IntEQ
, lhs_addr
, rhs_addr
);
702 let and_rhs
= bx
.icmp(op
, lhs_extra
, rhs_extra
);
703 let rhs
= bx
.and(and_lhs
, and_rhs
);
707 bug
!("unexpected fat ptr binop");
712 pub fn codegen_scalar_checked_binop(
719 ) -> OperandValue
<Bx
::Value
> {
720 // This case can currently arise only from functions marked
721 // with #[rustc_inherit_overflow_checks] and inlined from
722 // another crate (mostly core::num generic/#[inline] fns),
723 // while the current crate doesn't use overflow checks.
724 if !bx
.cx().check_overflow() {
725 let val
= self.codegen_scalar_binop(bx
, op
, lhs
, rhs
, input_ty
);
726 return OperandValue
::Pair(val
, bx
.cx().const_bool(false));
729 let (val
, of
) = match op
{
730 // These are checked using intrinsics
731 mir
::BinOp
::Add
| mir
::BinOp
::Sub
| mir
::BinOp
::Mul
=> {
733 mir
::BinOp
::Add
=> OverflowOp
::Add
,
734 mir
::BinOp
::Sub
=> OverflowOp
::Sub
,
735 mir
::BinOp
::Mul
=> OverflowOp
::Mul
,
738 bx
.checked_binop(oop
, input_ty
, lhs
, rhs
)
740 mir
::BinOp
::Shl
| mir
::BinOp
::Shr
=> {
741 let lhs_llty
= bx
.cx().val_ty(lhs
);
742 let rhs_llty
= bx
.cx().val_ty(rhs
);
743 let invert_mask
= common
::shift_mask_val(bx
, lhs_llty
, rhs_llty
, true);
744 let outer_bits
= bx
.and(rhs
, invert_mask
);
746 let of
= bx
.icmp(IntPredicate
::IntNE
, outer_bits
, bx
.cx().const_null(rhs_llty
));
747 let val
= self.codegen_scalar_binop(bx
, op
, lhs
, rhs
, input_ty
);
751 _
=> bug
!("Operator `{:?}` is not a checkable operator", op
),
754 OperandValue
::Pair(val
, of
)
758 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
759 pub fn rvalue_creates_operand(&self, rvalue
: &mir
::Rvalue
<'tcx
>, span
: Span
) -> bool
{
761 mir
::Rvalue
::Ref(..) |
762 mir
::Rvalue
::AddressOf(..) |
763 mir
::Rvalue
::Len(..) |
764 mir
::Rvalue
::Cast(..) | // (*)
765 mir
::Rvalue
::BinaryOp(..) |
766 mir
::Rvalue
::CheckedBinaryOp(..) |
767 mir
::Rvalue
::UnaryOp(..) |
768 mir
::Rvalue
::Discriminant(..) |
769 mir
::Rvalue
::NullaryOp(..) |
770 mir
::Rvalue
::ThreadLocalRef(_
) |
771 mir
::Rvalue
::Use(..) => // (*)
773 mir
::Rvalue
::Repeat(..) |
774 mir
::Rvalue
::Aggregate(..) => {
775 let ty
= rvalue
.ty(self.mir
, self.cx
.tcx());
776 let ty
= self.monomorphize(&ty
);
777 self.cx
.spanned_layout_of(ty
, span
).is_zst()
781 // (*) this is only true if the type is suitable
785 fn cast_float_to_int
<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>>(
791 int_layout
: TyAndLayout
<'tcx
>,
793 if let Some(false) = bx
.cx().sess().opts
.debugging_opts
.saturating_float_casts
{
794 return if signed { bx.fptosi(x, int_ty) }
else { bx.fptoui(x, int_ty) }
;
797 let try_sat_result
= if signed { bx.fptosi_sat(x, int_ty) }
else { bx.fptoui_sat(x, int_ty) }
;
798 if let Some(try_sat_result
) = try_sat_result
{
799 return try_sat_result
;
802 let int_width
= bx
.cx().int_width(int_ty
);
803 let float_width
= bx
.cx().float_width(float_ty
);
804 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
805 // destination integer type after rounding towards zero. This `undef` value can cause UB in
806 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
807 // Semantically, the mathematical value of the input is rounded towards zero to the next
808 // mathematical integer, and then the result is clamped into the range of the destination
809 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
810 // the destination integer type. NaN is mapped to 0.
812 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
813 // a value representable in int_ty.
814 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
815 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
816 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
817 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
818 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
819 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
820 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
821 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
822 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
823 let int_max
= |signed
: bool
, int_width
: u64| -> u128
{
824 let shift_amount
= 128 - int_width
;
825 if signed { i128::MAX as u128 >> shift_amount }
else { u128::MAX >> shift_amount }
827 let int_min
= |signed
: bool
, int_width
: u64| -> i128
{
828 if signed { i128::MIN >> (128 - int_width) }
else { 0 }
831 let compute_clamp_bounds_single
= |signed
: bool
, int_width
: u64| -> (u128
, u128
) {
832 let rounded_min
= ieee
::Single
::from_i128_r(int_min(signed
, int_width
), Round
::TowardZero
);
833 assert_eq
!(rounded_min
.status
, Status
::OK
);
834 let rounded_max
= ieee
::Single
::from_u128_r(int_max(signed
, int_width
), Round
::TowardZero
);
835 assert
!(rounded_max
.value
.is_finite());
836 (rounded_min
.value
.to_bits(), rounded_max
.value
.to_bits())
838 let compute_clamp_bounds_double
= |signed
: bool
, int_width
: u64| -> (u128
, u128
) {
839 let rounded_min
= ieee
::Double
::from_i128_r(int_min(signed
, int_width
), Round
::TowardZero
);
840 assert_eq
!(rounded_min
.status
, Status
::OK
);
841 let rounded_max
= ieee
::Double
::from_u128_r(int_max(signed
, int_width
), Round
::TowardZero
);
842 assert
!(rounded_max
.value
.is_finite());
843 (rounded_min
.value
.to_bits(), rounded_max
.value
.to_bits())
846 let mut float_bits_to_llval
= |bits
| {
847 let bits_llval
= match float_width
{
848 32 => bx
.cx().const_u32(bits
as u32),
849 64 => bx
.cx().const_u64(bits
as u64),
850 n
=> bug
!("unsupported float width {}", n
),
852 bx
.bitcast(bits_llval
, float_ty
)
854 let (f_min
, f_max
) = match float_width
{
855 32 => compute_clamp_bounds_single(signed
, int_width
),
856 64 => compute_clamp_bounds_double(signed
, int_width
),
857 n
=> bug
!("unsupported float width {}", n
),
859 let f_min
= float_bits_to_llval(f_min
);
860 let f_max
= float_bits_to_llval(f_max
);
861 // To implement saturation, we perform the following steps:
863 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
864 // 2. Compare x to f_min and f_max, and use the comparison results to select:
865 // a) int_ty::MIN if x < f_min or x is NaN
866 // b) int_ty::MAX if x > f_max
867 // c) the result of fpto[su]i otherwise
868 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
870 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
871 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
872 // undef does not introduce any non-determinism either.
873 // More importantly, the above procedure correctly implements saturating conversion.
875 // If x is NaN, 0 is returned by definition.
876 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
877 // This yields three cases to consider:
878 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
879 // saturating conversion for inputs in that range.
880 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
881 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
882 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
884 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
885 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
888 let int_max
= bx
.cx().const_uint_big(int_ty
, int_max(signed
, int_width
));
889 let int_min
= bx
.cx().const_uint_big(int_ty
, int_min(signed
, int_width
) as u128
);
890 let zero
= bx
.cx().const_uint(int_ty
, 0);
892 // The codegen here differs quite a bit depending on whether our builder's
893 // `fptosi` and `fptoui` instructions may trap for out-of-bounds values. If
894 // they don't trap then we can start doing everything inline with a
895 // `select` instruction because it's ok to execute `fptosi` and `fptoui`
896 // even if we don't use the results.
897 if !bx
.fptosui_may_trap(x
, int_ty
) {
899 let fptosui_result
= if signed { bx.fptosi(x, int_ty) }
else { bx.fptoui(x, int_ty) }
;
900 let less_or_nan
= bx
.fcmp(RealPredicate
::RealULT
, x
, f_min
);
901 let greater
= bx
.fcmp(RealPredicate
::RealOGT
, x
, f_max
);
903 // Step 2: We use two comparisons and two selects, with %s1 being the
905 // %less_or_nan = fcmp ult %x, %f_min
906 // %greater = fcmp olt %x, %f_max
907 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
908 // %s1 = select %greater, int_ty::MAX, %s0
909 // Note that %less_or_nan uses an *unordered* comparison. This
910 // comparison is true if the operands are not comparable (i.e., if x is
911 // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
914 // Performance note: Unordered comparison can be lowered to a "flipped"
915 // comparison and a negation, and the negation can be merged into the
916 // select. Therefore, it not necessarily any more expensive than a
917 // ordered ("normal") comparison. Whether these optimizations will be
918 // performed is ultimately up to the backend, but at least x86 does
920 let s0
= bx
.select(less_or_nan
, int_min
, fptosui_result
);
921 let s1
= bx
.select(greater
, int_max
, s0
);
923 // Step 3: NaN replacement.
924 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
925 // Therefore we only need to execute this step for signed integer types.
927 // LLVM has no isNaN predicate, so we use (x == x) instead
928 let cmp
= bx
.fcmp(RealPredicate
::RealOEQ
, x
, x
);
929 bx
.select(cmp
, s1
, zero
)
934 // In this case we cannot execute `fptosi` or `fptoui` and then later
935 // discard the result. The builder is telling us that these instructions
936 // will trap on out-of-bounds values, so we need to use basic blocks and
937 // control flow to avoid executing the `fptosi` and `fptoui`
940 // The general idea of what we're constructing here is, for f64 -> i32:
942 // ;; block so far... %0 is the argument
943 // %result = alloca i32, align 4
944 // %inbound_lower = fcmp oge double %0, 0xC1E0000000000000
945 // %inbound_upper = fcmp ole double %0, 0x41DFFFFFFFC00000
946 // ;; match (inbound_lower, inbound_upper) {
947 // ;; (true, true) => %0 can be converted without trapping
948 // ;; (false, false) => %0 is a NaN
949 // ;; (true, false) => %0 is too large
950 // ;; (false, true) => %0 is too small
953 // ;; The (true, true) check, go to %convert if so.
954 // %inbounds = and i1 %inbound_lower, %inbound_upper
955 // br i1 %inbounds, label %convert, label %specialcase
958 // %cvt = call i32 @llvm.wasm.trunc.signed.i32.f64(double %0)
959 // store i32 %cvt, i32* %result, align 4
963 // ;; Handle the cases where the number is NaN, too large or too small
965 // ;; Either (true, false) or (false, true)
966 // %is_not_nan = or i1 %inbound_lower, %inbound_upper
967 // ;; Figure out which saturated value we are interested in if not `NaN`
968 // %saturated = select i1 %inbound_lower, i32 2147483647, i32 -2147483648
969 // ;; Figure out between saturated and NaN representations
970 // %result_nan = select i1 %is_not_nan, i32 %saturated, i32 0
971 // store i32 %result_nan, i32* %result, align 4
975 // %r = load i32, i32* %result, align 4
977 let done
= bx
.build_sibling_block("float_cast_done");
978 let mut convert
= bx
.build_sibling_block("float_cast_convert");
979 let mut specialcase
= bx
.build_sibling_block("float_cast_specialcase");
981 let result
= PlaceRef
::alloca(bx
, int_layout
);
982 result
.storage_live(bx
);
984 // Use control flow to figure out whether we can execute `fptosi` in a
985 // basic block, or whether we go to a different basic block to implement
986 // the saturating logic.
987 let inbound_lower
= bx
.fcmp(RealPredicate
::RealOGE
, x
, f_min
);
988 let inbound_upper
= bx
.fcmp(RealPredicate
::RealOLE
, x
, f_max
);
989 let inbounds
= bx
.and(inbound_lower
, inbound_upper
);
990 bx
.cond_br(inbounds
, convert
.llbb(), specialcase
.llbb());
992 // Translation of the `convert` basic block
993 let cvt
= if signed { convert.fptosi(x, int_ty) }
else { convert.fptoui(x, int_ty) }
;
994 convert
.store(cvt
, result
.llval
, result
.align
);
995 convert
.br(done
.llbb());
997 // Translation of the `specialcase` basic block. Note that like above
998 // we try to be a bit clever here for unsigned conversions. In those
999 // cases the `int_min` is zero so we don't need two select instructions,
1000 // just one to choose whether we need `int_max` or not. If
1001 // `inbound_lower` is true then we're guaranteed to not be `NaN` and
1002 // since we're greater than zero we must be saturating to `int_max`. If
1003 // `inbound_lower` is false then we're either NaN or less than zero, so
1004 // we saturate to zero.
1005 let result_nan
= if signed
{
1006 let is_not_nan
= specialcase
.or(inbound_lower
, inbound_upper
);
1007 let saturated
= specialcase
.select(inbound_lower
, int_max
, int_min
);
1008 specialcase
.select(is_not_nan
, saturated
, zero
)
1010 specialcase
.select(inbound_lower
, int_max
, int_min
)
1012 specialcase
.store(result_nan
, result
.llval
, result
.align
);
1013 specialcase
.br(done
.llbb());
1015 // Translation of the `done` basic block, positioning ourselves to
1016 // continue from that point as well.
1018 let ret
= bx
.load(result
.llval
, result
.align
);
1019 result
.storage_dead(bx
);