1 use super::operand
::{OperandRef, OperandValue}
;
2 use super::place
::PlaceRef
;
3 use super::{FunctionCx, LocalRef}
;
6 use crate::common
::{self, IntPredicate}
;
10 use rustc_middle
::mir
;
11 use rustc_middle
::ty
::cast
::{CastTy, IntTy}
;
12 use rustc_middle
::ty
::layout
::{HasTyCtxt, LayoutOf}
;
13 use rustc_middle
::ty
::{self, adjustment::PointerCast, Instance, Ty, TyCtxt}
;
14 use rustc_span
::source_map
::{Span, DUMMY_SP}
;
15 use rustc_target
::abi
::{Abi, Int, Variants}
;
17 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
18 pub fn codegen_rvalue(
21 dest
: PlaceRef
<'tcx
, Bx
::Value
>,
22 rvalue
: &mir
::Rvalue
<'tcx
>,
24 debug
!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest
.llval
, rvalue
);
27 mir
::Rvalue
::Use(ref operand
) => {
28 let cg_operand
= self.codegen_operand(&mut bx
, operand
);
29 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
30 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
31 cg_operand
.val
.store(&mut bx
, dest
);
35 mir
::Rvalue
::Cast(mir
::CastKind
::Pointer(PointerCast
::Unsize
), ref source
, _
) => {
36 // The destination necessarily contains a fat pointer, so if
37 // it's a scalar pair, it's a fat pointer or newtype thereof.
38 if bx
.cx().is_backend_scalar_pair(dest
.layout
) {
39 // Into-coerce of a thin pointer to a fat pointer -- just
40 // use the operand path.
41 let (mut bx
, temp
) = self.codegen_rvalue_operand(bx
, rvalue
);
42 temp
.val
.store(&mut bx
, dest
);
46 // Unsize of a nontrivial struct. I would prefer for
47 // this to be eliminated by MIR building, but
48 // `CoerceUnsized` can be passed by a where-clause,
49 // so the (generic) MIR may not be able to expand it.
50 let operand
= self.codegen_operand(&mut bx
, source
);
52 OperandValue
::Pair(..) | OperandValue
::Immediate(_
) => {
53 // Unsize from an immediate structure. We don't
54 // really need a temporary alloca here, but
55 // avoiding it would require us to have
56 // `coerce_unsized_into` use `extractvalue` to
57 // index into the struct, and this case isn't
58 // important enough for it.
59 debug
!("codegen_rvalue: creating ugly alloca");
60 let scratch
= PlaceRef
::alloca(&mut bx
, operand
.layout
);
61 scratch
.storage_live(&mut bx
);
62 operand
.val
.store(&mut bx
, scratch
);
63 base
::coerce_unsized_into(&mut bx
, scratch
, dest
);
64 scratch
.storage_dead(&mut bx
);
66 OperandValue
::Ref(llref
, None
, align
) => {
67 let source
= PlaceRef
::new_sized_aligned(llref
, operand
.layout
, align
);
68 base
::coerce_unsized_into(&mut bx
, source
, dest
);
70 OperandValue
::Ref(_
, Some(_
), _
) => {
71 bug
!("unsized coercion on an unsized rvalue");
77 mir
::Rvalue
::Repeat(ref elem
, count
) => {
78 let cg_elem
= self.codegen_operand(&mut bx
, elem
);
80 // Do not generate the loop for zero-sized elements or empty arrays.
81 if dest
.layout
.is_zst() {
85 if let OperandValue
::Immediate(v
) = cg_elem
.val
{
86 let zero
= bx
.const_usize(0);
87 let start
= dest
.project_index(&mut bx
, zero
).llval
;
88 let size
= bx
.const_usize(dest
.layout
.size
.bytes());
90 // Use llvm.memset.p0i8.* to initialize all zero arrays
91 if bx
.cx().const_to_opt_uint(v
) == Some(0) {
92 let fill
= bx
.cx().const_u8(0);
93 bx
.memset(start
, fill
, size
, dest
.align
, MemFlags
::empty());
97 // Use llvm.memset.p0i8.* to initialize byte arrays
98 let v
= bx
.from_immediate(v
);
99 if bx
.cx().val_ty(v
) == bx
.cx().type_i8() {
100 bx
.memset(start
, v
, size
, dest
.align
, MemFlags
::empty());
106 self.monomorphize(count
).eval_usize(bx
.cx().tcx(), ty
::ParamEnv
::reveal_all());
108 bx
.write_operand_repeatedly(cg_elem
, count
, dest
)
111 mir
::Rvalue
::Aggregate(ref kind
, ref operands
) => {
112 let (dest
, active_field_index
) = match **kind
{
113 mir
::AggregateKind
::Adt(adt_did
, variant_index
, _
, _
, active_field_index
) => {
114 dest
.codegen_set_discr(&mut bx
, variant_index
);
115 if bx
.tcx().adt_def(adt_did
).is_enum() {
116 (dest
.project_downcast(&mut bx
, variant_index
), active_field_index
)
118 (dest
, active_field_index
)
123 for (i
, operand
) in operands
.iter().enumerate() {
124 let op
= self.codegen_operand(&mut bx
, operand
);
125 // Do not generate stores and GEPis for zero-sized fields.
126 if !op
.layout
.is_zst() {
127 let field_index
= active_field_index
.unwrap_or(i
);
128 let field
= dest
.project_field(&mut bx
, field_index
);
129 op
.val
.store(&mut bx
, field
);
136 assert
!(self.rvalue_creates_operand(rvalue
, DUMMY_SP
));
137 let (mut bx
, temp
) = self.codegen_rvalue_operand(bx
, rvalue
);
138 temp
.val
.store(&mut bx
, dest
);
144 pub fn codegen_rvalue_unsized(
147 indirect_dest
: PlaceRef
<'tcx
, Bx
::Value
>,
148 rvalue
: &mir
::Rvalue
<'tcx
>,
151 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
152 indirect_dest
.llval
, rvalue
156 mir
::Rvalue
::Use(ref operand
) => {
157 let cg_operand
= self.codegen_operand(&mut bx
, operand
);
158 cg_operand
.val
.store_unsized(&mut bx
, indirect_dest
);
162 _
=> bug
!("unsized assignment other than `Rvalue::Use`"),
166 pub fn codegen_rvalue_operand(
169 rvalue
: &mir
::Rvalue
<'tcx
>,
170 ) -> (Bx
, OperandRef
<'tcx
, Bx
::Value
>) {
172 self.rvalue_creates_operand(rvalue
, DUMMY_SP
),
173 "cannot codegen {:?} to operand",
178 mir
::Rvalue
::Cast(ref kind
, ref source
, mir_cast_ty
) => {
179 let operand
= self.codegen_operand(&mut bx
, source
);
180 debug
!("cast operand is {:?}", operand
);
181 let cast
= bx
.cx().layout_of(self.monomorphize(mir_cast_ty
));
183 let val
= match *kind
{
184 mir
::CastKind
::PointerExposeAddress
=> {
185 assert
!(bx
.cx().is_backend_immediate(cast
));
186 let llptr
= operand
.immediate();
187 let llcast_ty
= bx
.cx().immediate_backend_type(cast
);
188 let lladdr
= bx
.ptrtoint(llptr
, llcast_ty
);
189 OperandValue
::Immediate(lladdr
)
191 mir
::CastKind
::Pointer(PointerCast
::ReifyFnPointer
) => {
192 match *operand
.layout
.ty
.kind() {
193 ty
::FnDef(def_id
, substs
) => {
194 let instance
= ty
::Instance
::resolve_for_fn_ptr(
196 ty
::ParamEnv
::reveal_all(),
201 .polymorphize(bx
.cx().tcx());
202 OperandValue
::Immediate(bx
.get_fn_addr(instance
))
204 _
=> bug
!("{} cannot be reified to a fn ptr", operand
.layout
.ty
),
207 mir
::CastKind
::Pointer(PointerCast
::ClosureFnPointer(_
)) => {
208 match *operand
.layout
.ty
.kind() {
209 ty
::Closure(def_id
, substs
) => {
210 let instance
= Instance
::resolve_closure(
214 ty
::ClosureKind
::FnOnce
,
216 .polymorphize(bx
.cx().tcx());
217 OperandValue
::Immediate(bx
.cx().get_fn_addr(instance
))
219 _
=> bug
!("{} cannot be cast to a fn ptr", operand
.layout
.ty
),
222 mir
::CastKind
::Pointer(PointerCast
::UnsafeFnPointer
) => {
223 // This is a no-op at the LLVM level.
226 mir
::CastKind
::Pointer(PointerCast
::Unsize
) => {
227 assert
!(bx
.cx().is_backend_scalar_pair(cast
));
228 let (lldata
, llextra
) = match operand
.val
{
229 OperandValue
::Pair(lldata
, llextra
) => {
230 // unsize from a fat pointer -- this is a
231 // "trait-object-to-supertrait" coercion.
232 (lldata
, Some(llextra
))
234 OperandValue
::Immediate(lldata
) => {
238 OperandValue
::Ref(..) => {
239 bug
!("by-ref operand {:?} in `codegen_rvalue_operand`", operand
);
242 let (lldata
, llextra
) =
243 base
::unsize_ptr(&mut bx
, lldata
, operand
.layout
.ty
, cast
.ty
, llextra
);
244 OperandValue
::Pair(lldata
, llextra
)
246 mir
::CastKind
::Pointer(PointerCast
::MutToConstPointer
)
247 | mir
::CastKind
::Misc
248 if bx
.cx().is_backend_scalar_pair(operand
.layout
) =>
250 if let OperandValue
::Pair(data_ptr
, meta
) = operand
.val
{
251 if bx
.cx().is_backend_scalar_pair(cast
) {
252 let data_cast
= bx
.pointercast(
254 bx
.cx().scalar_pair_element_backend_type(cast
, 0, true),
256 OperandValue
::Pair(data_cast
, meta
)
259 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
260 // pointer-cast of that pointer to desired pointer type.
261 let llcast_ty
= bx
.cx().immediate_backend_type(cast
);
262 let llval
= bx
.pointercast(data_ptr
, llcast_ty
);
263 OperandValue
::Immediate(llval
)
266 bug
!("unexpected non-pair operand");
269 mir
::CastKind
::Pointer(
270 PointerCast
::MutToConstPointer
| PointerCast
::ArrayToPointer
,
272 | mir
::CastKind
::Misc
273 // Since int2ptr can have arbitrary integer types as input (so we have to do
274 // sign extension and all that), it is currently best handled in the same code
275 // path as the other integer-to-X casts.
276 | mir
::CastKind
::PointerFromExposedAddress
=> {
277 assert
!(bx
.cx().is_backend_immediate(cast
));
278 let ll_t_out
= bx
.cx().immediate_backend_type(cast
);
279 if operand
.layout
.abi
.is_uninhabited() {
280 let val
= OperandValue
::Immediate(bx
.cx().const_undef(ll_t_out
));
281 return (bx
, OperandRef { val, layout: cast }
);
284 CastTy
::from_ty(operand
.layout
.ty
).expect("bad input type for cast");
285 let r_t_out
= CastTy
::from_ty(cast
.ty
).expect("bad output type for cast");
286 let ll_t_in
= bx
.cx().immediate_backend_type(operand
.layout
);
287 match operand
.layout
.variants
{
288 Variants
::Single { index }
=> {
290 operand
.layout
.ty
.discriminant_for_variant(bx
.tcx(), index
)
292 let discr_layout
= bx
.cx().layout_of(discr
.ty
);
293 let discr_t
= bx
.cx().immediate_backend_type(discr_layout
);
294 let discr_val
= bx
.cx().const_uint_big(discr_t
, discr
.val
);
296 bx
.intcast(discr_val
, ll_t_out
, discr
.ty
.is_signed());
301 val
: OperandValue
::Immediate(discr_val
),
307 Variants
::Multiple { .. }
=> {}
309 let llval
= operand
.immediate();
311 let mut signed
= false;
312 if let Abi
::Scalar(scalar
) = operand
.layout
.abi
{
313 if let Int(_
, s
) = scalar
.primitive() {
314 // We use `i1` for bytes that are always `0` or `1`,
315 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
316 // let LLVM interpret the `i1` as signed, because
317 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
318 signed
= !scalar
.is_bool() && s
;
320 if !scalar
.is_always_valid(bx
.cx())
321 && scalar
.valid_range(bx
.cx()).end
322 >= scalar
.valid_range(bx
.cx()).start
324 // We want `table[e as usize ± k]` to not
325 // have bound checks, and this is the most
326 // convenient place to put the `assume`s.
327 if scalar
.valid_range(bx
.cx()).start
> 0 {
328 let enum_value_lower_bound
= bx
.cx().const_uint_big(
330 scalar
.valid_range(bx
.cx()).start
,
332 let cmp_start
= bx
.icmp(
333 IntPredicate
::IntUGE
,
335 enum_value_lower_bound
,
337 bx
.assume(cmp_start
);
340 let enum_value_upper_bound
= bx
342 .const_uint_big(ll_t_in
, scalar
.valid_range(bx
.cx()).end
);
343 let cmp_end
= bx
.icmp(
344 IntPredicate
::IntULE
,
346 enum_value_upper_bound
,
353 let newval
= match (r_t_in
, r_t_out
) {
354 (CastTy
::Int(_
), CastTy
::Int(_
)) => bx
.intcast(llval
, ll_t_out
, signed
),
355 (CastTy
::Float
, CastTy
::Float
) => {
356 let srcsz
= bx
.cx().float_width(ll_t_in
);
357 let dstsz
= bx
.cx().float_width(ll_t_out
);
359 bx
.fpext(llval
, ll_t_out
)
360 } else if srcsz
> dstsz
{
361 bx
.fptrunc(llval
, ll_t_out
)
366 (CastTy
::Int(_
), CastTy
::Float
) => {
368 bx
.sitofp(llval
, ll_t_out
)
370 bx
.uitofp(llval
, ll_t_out
)
373 (CastTy
::Ptr(_
) | CastTy
::FnPtr
, CastTy
::Ptr(_
)) => {
374 bx
.pointercast(llval
, ll_t_out
)
376 (CastTy
::Int(_
), CastTy
::Ptr(_
)) => {
377 let usize_llval
= bx
.intcast(llval
, bx
.cx().type_isize(), signed
);
378 bx
.inttoptr(usize_llval
, ll_t_out
)
380 (CastTy
::Float
, CastTy
::Int(IntTy
::I
)) => {
381 bx
.cast_float_to_int(true, llval
, ll_t_out
)
383 (CastTy
::Float
, CastTy
::Int(_
)) => {
384 bx
.cast_float_to_int(false, llval
, ll_t_out
)
386 _
=> bug
!("unsupported cast: {:?} to {:?}", operand
.layout
.ty
, cast
.ty
),
388 OperandValue
::Immediate(newval
)
391 (bx
, OperandRef { val, layout: cast }
)
394 mir
::Rvalue
::Ref(_
, bk
, place
) => {
395 let mk_ref
= move |tcx
: TyCtxt
<'tcx
>, ty
: Ty
<'tcx
>| {
397 tcx
.lifetimes
.re_erased
,
398 ty
::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
,
401 self.codegen_place_to_pointer(bx
, place
, mk_ref
)
404 mir
::Rvalue
::AddressOf(mutability
, place
) => {
405 let mk_ptr
= move |tcx
: TyCtxt
<'tcx
>, ty
: Ty
<'tcx
>| {
406 tcx
.mk_ptr(ty
::TypeAndMut { ty, mutbl: mutability }
)
408 self.codegen_place_to_pointer(bx
, place
, mk_ptr
)
411 mir
::Rvalue
::Len(place
) => {
412 let size
= self.evaluate_array_len(&mut bx
, place
);
413 let operand
= OperandRef
{
414 val
: OperandValue
::Immediate(size
),
415 layout
: bx
.cx().layout_of(bx
.tcx().types
.usize),
420 mir
::Rvalue
::BinaryOp(op
, box (ref lhs
, ref rhs
)) => {
421 let lhs
= self.codegen_operand(&mut bx
, lhs
);
422 let rhs
= self.codegen_operand(&mut bx
, rhs
);
423 let llresult
= match (lhs
.val
, rhs
.val
) {
425 OperandValue
::Pair(lhs_addr
, lhs_extra
),
426 OperandValue
::Pair(rhs_addr
, rhs_extra
),
427 ) => self.codegen_fat_ptr_binop(
437 (OperandValue
::Immediate(lhs_val
), OperandValue
::Immediate(rhs_val
)) => {
438 self.codegen_scalar_binop(&mut bx
, op
, lhs_val
, rhs_val
, lhs
.layout
.ty
)
443 let operand
= OperandRef
{
444 val
: OperandValue
::Immediate(llresult
),
445 layout
: bx
.cx().layout_of(op
.ty(bx
.tcx(), lhs
.layout
.ty
, rhs
.layout
.ty
)),
449 mir
::Rvalue
::CheckedBinaryOp(op
, box (ref lhs
, ref rhs
)) => {
450 let lhs
= self.codegen_operand(&mut bx
, lhs
);
451 let rhs
= self.codegen_operand(&mut bx
, rhs
);
452 let result
= self.codegen_scalar_checked_binop(
459 let val_ty
= op
.ty(bx
.tcx(), lhs
.layout
.ty
, rhs
.layout
.ty
);
460 let operand_ty
= bx
.tcx().intern_tup(&[val_ty
, bx
.tcx().types
.bool
]);
461 let operand
= OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
;
466 mir
::Rvalue
::UnaryOp(op
, ref operand
) => {
467 let operand
= self.codegen_operand(&mut bx
, operand
);
468 let lloperand
= operand
.immediate();
469 let is_float
= operand
.layout
.ty
.is_floating_point();
470 let llval
= match op
{
471 mir
::UnOp
::Not
=> bx
.not(lloperand
),
480 (bx
, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
)
483 mir
::Rvalue
::Discriminant(ref place
) => {
484 let discr_ty
= rvalue
.ty(self.mir
, bx
.tcx());
485 let discr_ty
= self.monomorphize(discr_ty
);
487 .codegen_place(&mut bx
, place
.as_ref())
488 .codegen_get_discr(&mut bx
, discr_ty
);
492 val
: OperandValue
::Immediate(discr
),
493 layout
: self.cx
.layout_of(discr_ty
),
498 mir
::Rvalue
::NullaryOp(null_op
, ty
) => {
499 let ty
= self.monomorphize(ty
);
500 assert
!(bx
.cx().type_is_sized(ty
));
501 let layout
= bx
.cx().layout_of(ty
);
502 let val
= match null_op
{
503 mir
::NullOp
::SizeOf
=> layout
.size
.bytes(),
504 mir
::NullOp
::AlignOf
=> layout
.align
.abi
.bytes(),
506 let val
= bx
.cx().const_usize(val
);
507 let tcx
= self.cx
.tcx();
511 val
: OperandValue
::Immediate(val
),
512 layout
: self.cx
.layout_of(tcx
.types
.usize),
517 mir
::Rvalue
::ThreadLocalRef(def_id
) => {
518 assert
!(bx
.cx().tcx().is_static(def_id
));
519 let static_
= bx
.get_static(def_id
);
520 let layout
= bx
.layout_of(bx
.cx().tcx().static_ptr_ty(def_id
));
521 let operand
= OperandRef
::from_immediate_or_packed_pair(&mut bx
, static_
, layout
);
524 mir
::Rvalue
::Use(ref operand
) => {
525 let operand
= self.codegen_operand(&mut bx
, operand
);
528 mir
::Rvalue
::Repeat(..) | mir
::Rvalue
::Aggregate(..) => {
529 // According to `rvalue_creates_operand`, only ZST
530 // aggregate rvalues are allowed to be operands.
531 let ty
= rvalue
.ty(self.mir
, self.cx
.tcx());
533 OperandRef
::new_zst(&mut bx
, self.cx
.layout_of(self.monomorphize(ty
)));
536 mir
::Rvalue
::ShallowInitBox(ref operand
, content_ty
) => {
537 let operand
= self.codegen_operand(&mut bx
, operand
);
538 let lloperand
= operand
.immediate();
540 let content_ty
= self.monomorphize(content_ty
);
541 let box_layout
= bx
.cx().layout_of(bx
.tcx().mk_box(content_ty
));
542 let llty_ptr
= bx
.cx().backend_type(box_layout
);
544 let val
= bx
.pointercast(lloperand
, llty_ptr
);
545 let operand
= OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
;
551 fn evaluate_array_len(&mut self, bx
: &mut Bx
, place
: mir
::Place
<'tcx
>) -> Bx
::Value
{
552 // ZST are passed as operands and require special handling
553 // because codegen_place() panics if Local is operand.
554 if let Some(index
) = place
.as_local() {
555 if let LocalRef
::Operand(Some(op
)) = self.locals
[index
] {
556 if let ty
::Array(_
, n
) = op
.layout
.ty
.kind() {
557 let n
= n
.eval_usize(bx
.cx().tcx(), ty
::ParamEnv
::reveal_all());
558 return bx
.cx().const_usize(n
);
562 // use common size calculation for non zero-sized types
563 let cg_value
= self.codegen_place(bx
, place
.as_ref());
564 cg_value
.len(bx
.cx())
567 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
568 fn codegen_place_to_pointer(
571 place
: mir
::Place
<'tcx
>,
572 mk_ptr_ty
: impl FnOnce(TyCtxt
<'tcx
>, Ty
<'tcx
>) -> Ty
<'tcx
>,
573 ) -> (Bx
, OperandRef
<'tcx
, Bx
::Value
>) {
574 let cg_place
= self.codegen_place(&mut bx
, place
.as_ref());
576 let ty
= cg_place
.layout
.ty
;
578 // Note: places are indirect, so storing the `llval` into the
579 // destination effectively creates a reference.
580 let val
= if !bx
.cx().type_has_metadata(ty
) {
581 OperandValue
::Immediate(cg_place
.llval
)
583 OperandValue
::Pair(cg_place
.llval
, cg_place
.llextra
.unwrap())
585 (bx
, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
)
588 pub fn codegen_scalar_binop(
596 let is_float
= input_ty
.is_floating_point();
597 let is_signed
= input_ty
.is_signed();
623 } else if is_signed
{
632 } else if is_signed
{
638 mir
::BinOp
::BitOr
=> bx
.or(lhs
, rhs
),
639 mir
::BinOp
::BitAnd
=> bx
.and(lhs
, rhs
),
640 mir
::BinOp
::BitXor
=> bx
.xor(lhs
, rhs
),
641 mir
::BinOp
::Offset
=> {
642 let pointee_type
= input_ty
644 .unwrap_or_else(|| bug
!("deref of non-pointer {:?}", input_ty
))
646 let llty
= bx
.cx().backend_type(bx
.cx().layout_of(pointee_type
));
647 bx
.inbounds_gep(llty
, lhs
, &[rhs
])
649 mir
::BinOp
::Shl
=> common
::build_unchecked_lshift(bx
, lhs
, rhs
),
650 mir
::BinOp
::Shr
=> common
::build_unchecked_rshift(bx
, input_ty
, lhs
, rhs
),
656 | mir
::BinOp
::Ge
=> {
658 bx
.fcmp(base
::bin_op_to_fcmp_predicate(op
.to_hir_binop()), lhs
, rhs
)
660 bx
.icmp(base
::bin_op_to_icmp_predicate(op
.to_hir_binop(), is_signed
), lhs
, rhs
)
666 pub fn codegen_fat_ptr_binop(
671 lhs_extra
: Bx
::Value
,
673 rhs_extra
: Bx
::Value
,
678 let lhs
= bx
.icmp(IntPredicate
::IntEQ
, lhs_addr
, rhs_addr
);
679 let rhs
= bx
.icmp(IntPredicate
::IntEQ
, lhs_extra
, rhs_extra
);
683 let lhs
= bx
.icmp(IntPredicate
::IntNE
, lhs_addr
, rhs_addr
);
684 let rhs
= bx
.icmp(IntPredicate
::IntNE
, lhs_extra
, rhs_extra
);
687 mir
::BinOp
::Le
| mir
::BinOp
::Lt
| mir
::BinOp
::Ge
| mir
::BinOp
::Gt
=> {
688 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
689 let (op
, strict_op
) = match op
{
690 mir
::BinOp
::Lt
=> (IntPredicate
::IntULT
, IntPredicate
::IntULT
),
691 mir
::BinOp
::Le
=> (IntPredicate
::IntULE
, IntPredicate
::IntULT
),
692 mir
::BinOp
::Gt
=> (IntPredicate
::IntUGT
, IntPredicate
::IntUGT
),
693 mir
::BinOp
::Ge
=> (IntPredicate
::IntUGE
, IntPredicate
::IntUGT
),
696 let lhs
= bx
.icmp(strict_op
, lhs_addr
, rhs_addr
);
697 let and_lhs
= bx
.icmp(IntPredicate
::IntEQ
, lhs_addr
, rhs_addr
);
698 let and_rhs
= bx
.icmp(op
, lhs_extra
, rhs_extra
);
699 let rhs
= bx
.and(and_lhs
, and_rhs
);
703 bug
!("unexpected fat ptr binop");
708 pub fn codegen_scalar_checked_binop(
715 ) -> OperandValue
<Bx
::Value
> {
716 // This case can currently arise only from functions marked
717 // with #[rustc_inherit_overflow_checks] and inlined from
718 // another crate (mostly core::num generic/#[inline] fns),
719 // while the current crate doesn't use overflow checks.
720 if !bx
.cx().check_overflow() {
721 let val
= self.codegen_scalar_binop(bx
, op
, lhs
, rhs
, input_ty
);
722 return OperandValue
::Pair(val
, bx
.cx().const_bool(false));
725 let (val
, of
) = match op
{
726 // These are checked using intrinsics
727 mir
::BinOp
::Add
| mir
::BinOp
::Sub
| mir
::BinOp
::Mul
=> {
729 mir
::BinOp
::Add
=> OverflowOp
::Add
,
730 mir
::BinOp
::Sub
=> OverflowOp
::Sub
,
731 mir
::BinOp
::Mul
=> OverflowOp
::Mul
,
734 bx
.checked_binop(oop
, input_ty
, lhs
, rhs
)
736 mir
::BinOp
::Shl
| mir
::BinOp
::Shr
=> {
737 let lhs_llty
= bx
.cx().val_ty(lhs
);
738 let rhs_llty
= bx
.cx().val_ty(rhs
);
739 let invert_mask
= common
::shift_mask_val(bx
, lhs_llty
, rhs_llty
, true);
740 let outer_bits
= bx
.and(rhs
, invert_mask
);
742 let of
= bx
.icmp(IntPredicate
::IntNE
, outer_bits
, bx
.cx().const_null(rhs_llty
));
743 let val
= self.codegen_scalar_binop(bx
, op
, lhs
, rhs
, input_ty
);
747 _
=> bug
!("Operator `{:?}` is not a checkable operator", op
),
750 OperandValue
::Pair(val
, of
)
754 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
755 pub fn rvalue_creates_operand(&self, rvalue
: &mir
::Rvalue
<'tcx
>, span
: Span
) -> bool
{
757 mir
::Rvalue
::Ref(..) |
758 mir
::Rvalue
::AddressOf(..) |
759 mir
::Rvalue
::Len(..) |
760 mir
::Rvalue
::Cast(..) | // (*)
761 mir
::Rvalue
::ShallowInitBox(..) | // (*)
762 mir
::Rvalue
::BinaryOp(..) |
763 mir
::Rvalue
::CheckedBinaryOp(..) |
764 mir
::Rvalue
::UnaryOp(..) |
765 mir
::Rvalue
::Discriminant(..) |
766 mir
::Rvalue
::NullaryOp(..) |
767 mir
::Rvalue
::ThreadLocalRef(_
) |
768 mir
::Rvalue
::Use(..) => // (*)
770 mir
::Rvalue
::Repeat(..) |
771 mir
::Rvalue
::Aggregate(..) => {
772 let ty
= rvalue
.ty(self.mir
, self.cx
.tcx());
773 let ty
= self.monomorphize(ty
);
774 self.cx
.spanned_layout_of(ty
, span
).is_zst()
778 // (*) this is only true if the type is suitable