]> git.proxmox.com Git - rustc.git/blame - src/librustc_codegen_ssa/mir/rvalue.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / librustc_codegen_ssa / mir / rvalue.rs
CommitLineData
60c5eb7d
XL
1use super::operand::{OperandRef, OperandValue};
2use super::place::PlaceRef;
dfeec247 3use super::{FunctionCx, LocalRef};
60c5eb7d
XL
4
5use crate::base;
dfeec247 6use crate::common::{self, IntPredicate, RealPredicate};
60c5eb7d 7use crate::traits::*;
dfeec247 8use crate::MemFlags;
60c5eb7d 9
dfeec247 10use rustc_apfloat::{ieee, Float, Round, Status};
ba9703b0
XL
11use rustc_hir::lang_items::ExchangeMallocFnLangItem;
12use rustc_middle::mir;
13use rustc_middle::ty::cast::{CastTy, IntTy};
14use rustc_middle::ty::layout::HasTyCtxt;
15use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
dfeec247
XL
16use rustc_span::source_map::{Span, DUMMY_SP};
17use rustc_span::symbol::sym;
ba9703b0 18use rustc_target::abi::{Abi, Int, LayoutOf, Variants};
92a42be0 19
dc9dc135 20impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
a1dfa0c6
XL
21 pub fn codegen_rvalue(
22 &mut self,
23 mut bx: Bx,
24 dest: PlaceRef<'tcx, Bx::Value>,
dfeec247 25 rvalue: &mir::Rvalue<'tcx>,
a1dfa0c6 26 ) -> Bx {
dfeec247 27 debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue);
92a42be0
SL
28
29 match *rvalue {
dfeec247
XL
30 mir::Rvalue::Use(ref operand) => {
31 let cg_operand = self.codegen_operand(&mut bx, operand);
32 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
33 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
34 cg_operand.val.store(&mut bx, dest);
35 bx
36 }
92a42be0 37
48663c56 38 mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
ff7c6d11
XL
39 // The destination necessarily contains a fat pointer, so if
40 // it's a scalar pair, it's a fat pointer or newtype thereof.
a1dfa0c6 41 if bx.cx().is_backend_scalar_pair(dest.layout) {
60c5eb7d 42 // Into-coerce of a thin pointer to a fat pointer -- just
92a42be0 43 // use the operand path.
a1dfa0c6
XL
44 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
45 temp.val.store(&mut bx, dest);
2c00a5a8 46 return bx;
92a42be0
SL
47 }
48
49 // Unsize of a nontrivial struct. I would prefer for
94b46f34 50 // this to be eliminated by MIR building, but
92a42be0
SL
51 // `CoerceUnsized` can be passed by a where-clause,
52 // so the (generic) MIR may not be able to expand it.
a1dfa0c6 53 let operand = self.codegen_operand(&mut bx, source);
ff7c6d11 54 match operand.val {
dfeec247 55 OperandValue::Pair(..) | OperandValue::Immediate(_) => {
60c5eb7d 56 // Unsize from an immediate structure. We don't
32a655c1
SL
57 // really need a temporary alloca here, but
58 // avoiding it would require us to have
60c5eb7d 59 // `coerce_unsized_into` use `extractvalue` to
32a655c1
SL
60 // index into the struct, and this case isn't
61 // important enough for it.
94b46f34 62 debug!("codegen_rvalue: creating ugly alloca");
e1599b0c 63 let scratch = PlaceRef::alloca(&mut bx, operand.layout);
a1dfa0c6
XL
64 scratch.storage_live(&mut bx);
65 operand.val.store(&mut bx, scratch);
66 base::coerce_unsized_into(&mut bx, scratch, dest);
67 scratch.storage_dead(&mut bx);
32a655c1 68 }
b7449926 69 OperandValue::Ref(llref, None, align) => {
e1599b0c 70 let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
a1dfa0c6 71 base::coerce_unsized_into(&mut bx, source, dest);
92a42be0 72 }
b7449926 73 OperandValue::Ref(_, Some(_), _) => {
60c5eb7d 74 bug!("unsized coercion on an unsized rvalue");
b7449926 75 }
ff7c6d11 76 }
2c00a5a8 77 bx
92a42be0
SL
78 }
79
ea8adc8c 80 mir::Rvalue::Repeat(ref elem, count) => {
a1dfa0c6 81 let cg_elem = self.codegen_operand(&mut bx, elem);
3b2f2976 82
ff7c6d11
XL
83 // Do not generate the loop for zero-sized elements or empty arrays.
84 if dest.layout.is_zst() {
2c00a5a8 85 return bx;
3b2f2976 86 }
3b2f2976 87
94b46f34 88 if let OperandValue::Immediate(v) = cg_elem.val {
532ac7d7
XL
89 let zero = bx.const_usize(0);
90 let start = dest.project_index(&mut bx, zero).llval;
91 let size = bx.const_usize(dest.layout.size.bytes());
ff7c6d11 92
3b2f2976 93 // Use llvm.memset.p0i8.* to initialize all zero arrays
e74abb32 94 if bx.cx().const_to_opt_uint(v) == Some(0) {
a1dfa0c6
XL
95 let fill = bx.cx().const_u8(0);
96 bx.memset(start, fill, size, dest.align, MemFlags::empty());
2c00a5a8 97 return bx;
3b2f2976
XL
98 }
99
100 // Use llvm.memset.p0i8.* to initialize byte arrays
a1dfa0c6
XL
101 let v = base::from_immediate(&mut bx, v);
102 if bx.cx().val_ty(v) == bx.cx().type_i8() {
103 bx.memset(start, v, size, dest.align, MemFlags::empty());
2c00a5a8 104 return bx;
3b2f2976
XL
105 }
106 }
107
ba9703b0
XL
108 let count =
109 self.monomorphize(&count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
110
532ac7d7 111 bx.write_operand_repeatedly(cg_elem, count, dest)
92a42be0
SL
112 }
113
9cc50fc6 114 mir::Rvalue::Aggregate(ref kind, ref operands) => {
ff7c6d11 115 let (dest, active_field_index) = match **kind {
b7449926 116 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
a1dfa0c6 117 dest.codegen_set_discr(&mut bx, variant_index);
ff7c6d11 118 if adt_def.is_enum() {
a1dfa0c6 119 (dest.project_downcast(&mut bx, variant_index), active_field_index)
ff7c6d11
XL
120 } else {
121 (dest, active_field_index)
9cc50fc6
SL
122 }
123 }
dfeec247 124 _ => (dest, None),
ff7c6d11
XL
125 };
126 for (i, operand) in operands.iter().enumerate() {
a1dfa0c6 127 let op = self.codegen_operand(&mut bx, operand);
ff7c6d11
XL
128 // Do not generate stores and GEPis for zero-sized fields.
129 if !op.layout.is_zst() {
130 let field_index = active_field_index.unwrap_or(i);
a1dfa0c6
XL
131 let field = dest.project_field(&mut bx, field_index);
132 op.val.store(&mut bx, field);
ff7c6d11 133 }
92a42be0 134 }
2c00a5a8 135 bx
92a42be0
SL
136 }
137
92a42be0 138 _ => {
416331ca 139 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
a1dfa0c6
XL
140 let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
141 temp.val.store(&mut bx, dest);
2c00a5a8 142 bx
92a42be0
SL
143 }
144 }
145 }
146
a1dfa0c6
XL
147 pub fn codegen_rvalue_unsized(
148 &mut self,
149 mut bx: Bx,
150 indirect_dest: PlaceRef<'tcx, Bx::Value>,
151 rvalue: &mir::Rvalue<'tcx>,
152 ) -> Bx {
dfeec247
XL
153 debug!(
154 "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
155 indirect_dest.llval, rvalue
156 );
b7449926
XL
157
158 match *rvalue {
159 mir::Rvalue::Use(ref operand) => {
a1dfa0c6
XL
160 let cg_operand = self.codegen_operand(&mut bx, operand);
161 cg_operand.val.store_unsized(&mut bx, indirect_dest);
b7449926
XL
162 bx
163 }
164
60c5eb7d 165 _ => bug!("unsized assignment other than `Rvalue::Use`"),
b7449926
XL
166 }
167 }
168
a1dfa0c6
XL
169 pub fn codegen_rvalue_operand(
170 &mut self,
171 mut bx: Bx,
dfeec247 172 rvalue: &mir::Rvalue<'tcx>,
a1dfa0c6 173 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
416331ca
XL
174 assert!(
175 self.rvalue_creates_operand(rvalue, DUMMY_SP),
176 "cannot codegen {:?} to operand",
177 rvalue,
178 );
92a42be0
SL
179
180 match *rvalue {
ff7c6d11 181 mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
a1dfa0c6 182 let operand = self.codegen_operand(&mut bx, source);
54a0048b 183 debug!("cast operand is {:?}", operand);
a1dfa0c6 184 let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
92a42be0
SL
185
186 let val = match *kind {
48663c56 187 mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
e74abb32 188 match operand.layout.ty.kind {
b7449926 189 ty::FnDef(def_id, substs) => {
48663c56
XL
190 if bx.cx().tcx().has_attr(def_id, sym::rustc_args_required_const) {
191 bug!("reifying a fn ptr that requires const arguments");
2c00a5a8 192 }
54a0048b 193 OperandValue::Immediate(
e74abb32
XL
194 bx.get_fn_addr(
195 ty::Instance::resolve_for_fn_ptr(
196 bx.tcx(),
197 ty::ParamEnv::reveal_all(),
198 def_id,
dfeec247
XL
199 substs,
200 )
201 .unwrap(),
202 ),
e74abb32 203 )
54a0048b 204 }
dfeec247 205 _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
54a0048b
SL
206 }
207 }
48663c56 208 mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
e74abb32 209 match operand.layout.ty.kind {
b7449926 210 ty::Closure(def_id, substs) => {
dc9dc135 211 let instance = Instance::resolve_closure(
e74abb32
XL
212 bx.cx().tcx(),
213 def_id,
214 substs,
dfeec247
XL
215 ty::ClosureKind::FnOnce,
216 );
e74abb32 217 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
8bb4bdeb 218 }
dfeec247 219 _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
8bb4bdeb
XL
220 }
221 }
48663c56 222 mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
60c5eb7d 223 // This is a no-op at the LLVM level.
92a42be0
SL
224 operand.val
225 }
48663c56 226 mir::CastKind::Pointer(PointerCast::Unsize) => {
a1dfa0c6 227 assert!(bx.cx().is_backend_scalar_pair(cast));
92a42be0 228 match operand.val {
3157f602 229 OperandValue::Pair(lldata, llextra) => {
60c5eb7d 230 // unsize from a fat pointer -- this is a
92a42be0 231 // "trait-object-to-supertrait" coercion, for
60c5eb7d 232 // example, `&'a fmt::Debug + Send => &'a fmt::Debug`.
ff7c6d11
XL
233
234 // HACK(eddyb) have to bitcast pointers
235 // until LLVM removes pointee types.
dfeec247
XL
236 let lldata = bx.pointercast(
237 lldata,
238 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
239 );
3157f602 240 OperandValue::Pair(lldata, llextra)
92a42be0
SL
241 }
242 OperandValue::Immediate(lldata) => {
243 // "standard" unsize
dfeec247
XL
244 let (lldata, llextra) = base::unsize_thin_ptr(
245 &mut bx,
246 lldata,
247 operand.layout.ty,
248 cast.ty,
249 );
3157f602 250 OperandValue::Pair(lldata, llextra)
92a42be0 251 }
32a655c1 252 OperandValue::Ref(..) => {
dfeec247 253 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
92a42be0
SL
254 }
255 }
256 }
dfeec247
XL
257 mir::CastKind::Pointer(PointerCast::MutToConstPointer)
258 | mir::CastKind::Misc
259 if bx.cx().is_backend_scalar_pair(operand.layout) =>
260 {
ff7c6d11 261 if let OperandValue::Pair(data_ptr, meta) = operand.val {
a1dfa0c6 262 if bx.cx().is_backend_scalar_pair(cast) {
dfeec247
XL
263 let data_cast = bx.pointercast(
264 data_ptr,
265 bx.cx().scalar_pair_element_backend_type(cast, 0, true),
266 );
ff7c6d11 267 OperandValue::Pair(data_cast, meta)
dfeec247
XL
268 } else {
269 // cast to thin-ptr
5bcae85e
SL
270 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
271 // pointer-cast of that pointer to desired pointer type.
a1dfa0c6 272 let llcast_ty = bx.cx().immediate_backend_type(cast);
2c00a5a8 273 let llval = bx.pointercast(data_ptr, llcast_ty);
5bcae85e
SL
274 OperandValue::Immediate(llval)
275 }
276 } else {
60c5eb7d 277 bug!("unexpected non-pair operand");
5bcae85e
SL
278 }
279 }
ba9703b0
XL
280 mir::CastKind::Pointer(
281 PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
282 )
dfeec247 283 | mir::CastKind::Misc => {
a1dfa0c6
XL
284 assert!(bx.cx().is_backend_immediate(cast));
285 let ll_t_out = bx.cx().immediate_backend_type(cast);
0bf4aa26 286 if operand.layout.abi.is_uninhabited() {
a1dfa0c6 287 let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
dfeec247 288 return (bx, OperandRef { val, layout: cast });
83c7162d 289 }
dfeec247
XL
290 let r_t_in =
291 CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
ff7c6d11 292 let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
a1dfa0c6 293 let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
83c7162d 294 match operand.layout.variants {
ba9703b0 295 Variants::Single { index } => {
48663c56
XL
296 if let Some(discr) =
297 operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
298 {
ba9703b0
XL
299 let discr_layout = bx.cx().layout_of(discr.ty);
300 let discr_t = bx.cx().immediate_backend_type(discr_layout);
301 let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
302 let discr_val =
303 bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
304
dfeec247
XL
305 return (
306 bx,
307 OperandRef {
308 val: OperandValue::Immediate(discr_val),
309 layout: cast,
310 },
311 );
83c7162d
XL
312 }
313 }
ba9703b0 314 Variants::Multiple { .. } => {}
83c7162d 315 }
32a655c1 316 let llval = operand.immediate();
c30ab7b3 317
ff7c6d11 318 let mut signed = false;
ba9703b0
XL
319 if let Abi::Scalar(ref scalar) = operand.layout.abi {
320 if let Int(_, s) = scalar.value {
94b46f34 321 // We use `i1` for bytes that are always `0` or `1`,
0731742a 322 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
94b46f34 323 // let LLVM interpret the `i1` as signed, because
0731742a 324 // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
94b46f34 325 signed = !scalar.is_bool() && s;
ff7c6d11 326
a1dfa0c6 327 let er = scalar.valid_range_exclusive(bx.cx());
dfeec247
XL
328 if er.end != er.start
329 && scalar.valid_range.end() > scalar.valid_range.start()
330 {
ff7c6d11
XL
331 // We want `table[e as usize]` to not
332 // have bound checks, and this is the most
333 // convenient place to put the `assume`.
a1dfa0c6
XL
334 let ll_t_in_const =
335 bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end());
dfeec247 336 let cmp = bx.icmp(IntPredicate::IntULE, llval, ll_t_in_const);
a1dfa0c6 337 bx.assume(cmp);
ff7c6d11
XL
338 }
339 }
340 }
9cc50fc6
SL
341
342 let newval = match (r_t_in, r_t_out) {
dfeec247 343 (CastTy::Int(_), CastTy::Int(_)) => bx.intcast(llval, ll_t_out, signed),
9cc50fc6 344 (CastTy::Float, CastTy::Float) => {
a1dfa0c6
XL
345 let srcsz = bx.cx().float_width(ll_t_in);
346 let dstsz = bx.cx().float_width(ll_t_out);
9cc50fc6 347 if dstsz > srcsz {
2c00a5a8 348 bx.fpext(llval, ll_t_out)
9cc50fc6 349 } else if srcsz > dstsz {
2c00a5a8 350 bx.fptrunc(llval, ll_t_out)
9cc50fc6
SL
351 } else {
352 llval
353 }
354 }
dfeec247
XL
355 (CastTy::Int(_), CastTy::Float) => {
356 if signed {
357 bx.sitofp(llval, ll_t_out)
358 } else {
359 bx.uitofp(llval, ll_t_out)
360 }
361 }
ba9703b0 362 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
dfeec247
XL
363 bx.pointercast(llval, ll_t_out)
364 }
ba9703b0 365 (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Int(_)) => {
dfeec247
XL
366 bx.ptrtoint(llval, ll_t_out)
367 }
2c00a5a8 368 (CastTy::Int(_), CastTy::Ptr(_)) => {
a1dfa0c6 369 let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
2c00a5a8
XL
370 bx.inttoptr(usize_llval, ll_t_out)
371 }
dfeec247
XL
372 (CastTy::Float, CastTy::Int(IntTy::I)) => {
373 cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out)
374 }
375 (CastTy::Float, CastTy::Int(_)) => {
376 cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out)
377 }
378 _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
9cc50fc6
SL
379 };
380 OperandValue::Immediate(newval)
381 }
92a42be0 382 };
dfeec247 383 (bx, OperandRef { val, layout: cast })
92a42be0
SL
384 }
385
ba9703b0 386 mir::Rvalue::Ref(_, bk, place) => {
dfeec247
XL
387 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
388 tcx.mk_ref(
389 tcx.lifetimes.re_erased,
390 ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
391 )
392 };
393 self.codegen_place_to_pointer(bx, place, mk_ref)
394 }
92a42be0 395
ba9703b0 396 mir::Rvalue::AddressOf(mutability, place) => {
dfeec247 397 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
74b04a01 398 tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
7453a54e 399 };
dfeec247 400 self.codegen_place_to_pointer(bx, place, mk_ptr)
92a42be0
SL
401 }
402
ba9703b0 403 mir::Rvalue::Len(place) => {
a1dfa0c6 404 let size = self.evaluate_array_len(&mut bx, place);
7453a54e 405 let operand = OperandRef {
3b2f2976 406 val: OperandValue::Immediate(size),
a1dfa0c6 407 layout: bx.cx().layout_of(bx.tcx().types.usize),
7453a54e 408 };
2c00a5a8 409 (bx, operand)
92a42be0
SL
410 }
411
412 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
a1dfa0c6
XL
413 let lhs = self.codegen_operand(&mut bx, lhs);
414 let rhs = self.codegen_operand(&mut bx, rhs);
ff7c6d11 415 let llresult = match (lhs.val, rhs.val) {
dfeec247
XL
416 (
417 OperandValue::Pair(lhs_addr, lhs_extra),
418 OperandValue::Pair(rhs_addr, rhs_extra),
419 ) => self.codegen_fat_ptr_binop(
420 &mut bx,
421 op,
422 lhs_addr,
423 lhs_extra,
424 rhs_addr,
425 rhs_extra,
426 lhs.layout.ty,
427 ),
428
429 (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
a1dfa0c6 430 self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
ff7c6d11
XL
431 }
432
dfeec247 433 _ => bug!(),
92a42be0 434 };
7453a54e 435 let operand = OperandRef {
92a42be0 436 val: OperandValue::Immediate(llresult),
dfeec247 437 layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
7453a54e 438 };
2c00a5a8 439 (bx, operand)
92a42be0 440 }
3157f602 441 mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
a1dfa0c6
XL
442 let lhs = self.codegen_operand(&mut bx, lhs);
443 let rhs = self.codegen_operand(&mut bx, rhs);
dfeec247
XL
444 let result = self.codegen_scalar_checked_binop(
445 &mut bx,
446 op,
447 lhs.immediate(),
448 rhs.immediate(),
449 lhs.layout.ty,
450 );
2c00a5a8 451 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
0531ce1d 452 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
dfeec247 453 let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
3157f602 454
2c00a5a8 455 (bx, operand)
3157f602 456 }
92a42be0
SL
457
458 mir::Rvalue::UnaryOp(op, ref operand) => {
a1dfa0c6 459 let operand = self.codegen_operand(&mut bx, operand);
92a42be0 460 let lloperand = operand.immediate();
dc9dc135 461 let is_float = operand.layout.ty.is_floating_point();
92a42be0 462 let llval = match op {
2c00a5a8 463 mir::UnOp::Not => bx.not(lloperand),
dfeec247
XL
464 mir::UnOp::Neg => {
465 if is_float {
466 bx.fneg(lloperand)
467 } else {
468 bx.neg(lloperand)
469 }
92a42be0
SL
470 }
471 };
dfeec247 472 (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
92a42be0
SL
473 }
474
ff7c6d11 475 mir::Rvalue::Discriminant(ref place) => {
60c5eb7d 476 let discr_ty = rvalue.ty(*self.mir, bx.tcx());
dfeec247 477 let discr = self
74b04a01 478 .codegen_place(&mut bx, place.as_ref())
a1dfa0c6 479 .codegen_get_discr(&mut bx, discr_ty);
dfeec247
XL
480 (
481 bx,
482 OperandRef {
483 val: OperandValue::Immediate(discr),
484 layout: self.cx.layout_of(discr_ty),
485 },
486 )
8bb4bdeb
XL
487 }
488
7cac9316 489 mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
a1dfa0c6
XL
490 assert!(bx.cx().type_is_sized(ty));
491 let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
492 let tcx = self.cx.tcx();
dfeec247
XL
493 (
494 bx,
495 OperandRef {
496 val: OperandValue::Immediate(val),
497 layout: self.cx.layout_of(tcx.types.usize),
498 },
499 )
7cac9316
XL
500 }
501
502 mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
a1dfa0c6
XL
503 let content_ty = self.monomorphize(&content_ty);
504 let content_layout = bx.cx().layout_of(content_ty);
505 let llsize = bx.cx().const_usize(content_layout.size.bytes());
506 let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
507 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
508 let llty_ptr = bx.cx().backend_type(box_layout);
32a655c1
SL
509
510 // Allocate space:
2c00a5a8 511 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
32a655c1
SL
512 Ok(id) => id,
513 Err(s) => {
a1dfa0c6 514 bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
32a655c1
SL
515 }
516 };
2c00a5a8 517 let instance = ty::Instance::mono(bx.tcx(), def_id);
e74abb32 518 let r = bx.cx().get_fn_addr(instance);
a1dfa0c6
XL
519 let call = bx.call(r, &[llsize, llalign], None);
520 let val = bx.pointercast(call, llty_ptr);
32a655c1 521
dfeec247 522 let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
2c00a5a8 523 (bx, operand)
92a42be0 524 }
a7813a04 525 mir::Rvalue::Use(ref operand) => {
a1dfa0c6 526 let operand = self.codegen_operand(&mut bx, operand);
2c00a5a8 527 (bx, operand)
a7813a04 528 }
dfeec247 529 mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
cc61c64b
XL
530 // According to `rvalue_creates_operand`, only ZST
531 // aggregate rvalues are allowed to be operands.
60c5eb7d 532 let ty = rvalue.ty(*self.mir, self.cx.tcx());
dfeec247
XL
533 let operand =
534 OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(&ty)));
532ac7d7 535 (bx, operand)
92a42be0
SL
536 }
537 }
538 }
539
ba9703b0 540 fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
3b2f2976 541 // ZST are passed as operands and require special handling
94b46f34 542 // because codegen_place() panics if Local is operand.
e74abb32 543 if let Some(index) = place.as_local() {
3b2f2976 544 if let LocalRef::Operand(Some(op)) = self.locals[index] {
e74abb32 545 if let ty::Array(_, n) = op.layout.ty.kind {
416331ca 546 let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
a1dfa0c6 547 return bx.cx().const_usize(n);
3b2f2976
XL
548 }
549 }
550 }
551 // use common size calculation for non zero-sized types
74b04a01 552 let cg_value = self.codegen_place(bx, place.as_ref());
416331ca 553 cg_value.len(bx.cx())
3b2f2976
XL
554 }
555
dfeec247
XL
556 /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
557 fn codegen_place_to_pointer(
558 &mut self,
559 mut bx: Bx,
ba9703b0 560 place: mir::Place<'tcx>,
dfeec247
XL
561 mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
562 ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
74b04a01 563 let cg_place = self.codegen_place(&mut bx, place.as_ref());
dfeec247
XL
564
565 let ty = cg_place.layout.ty;
566
567 // Note: places are indirect, so storing the `llval` into the
568 // destination effectively creates a reference.
569 let val = if !bx.cx().type_has_metadata(ty) {
570 OperandValue::Immediate(cg_place.llval)
571 } else {
572 OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
573 };
574 (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
575 }
576
b7449926
XL
577 pub fn codegen_scalar_binop(
578 &mut self,
a1dfa0c6 579 bx: &mut Bx,
b7449926 580 op: mir::BinOp,
a1dfa0c6
XL
581 lhs: Bx::Value,
582 rhs: Bx::Value,
b7449926 583 input_ty: Ty<'tcx>,
a1dfa0c6 584 ) -> Bx::Value {
dc9dc135 585 let is_float = input_ty.is_floating_point();
92a42be0
SL
586 let is_signed = input_ty.is_signed();
587 match op {
dfeec247
XL
588 mir::BinOp::Add => {
589 if is_float {
590 bx.fadd(lhs, rhs)
591 } else {
592 bx.add(lhs, rhs)
593 }
594 }
595 mir::BinOp::Sub => {
596 if is_float {
597 bx.fsub(lhs, rhs)
598 } else {
599 bx.sub(lhs, rhs)
600 }
601 }
602 mir::BinOp::Mul => {
603 if is_float {
604 bx.fmul(lhs, rhs)
605 } else {
606 bx.mul(lhs, rhs)
607 }
608 }
609 mir::BinOp::Div => {
610 if is_float {
611 bx.fdiv(lhs, rhs)
612 } else if is_signed {
613 bx.sdiv(lhs, rhs)
614 } else {
615 bx.udiv(lhs, rhs)
616 }
617 }
618 mir::BinOp::Rem => {
619 if is_float {
620 bx.frem(lhs, rhs)
621 } else if is_signed {
622 bx.srem(lhs, rhs)
623 } else {
624 bx.urem(lhs, rhs)
625 }
626 }
2c00a5a8
XL
627 mir::BinOp::BitOr => bx.or(lhs, rhs),
628 mir::BinOp::BitAnd => bx.and(lhs, rhs),
629 mir::BinOp::BitXor => bx.xor(lhs, rhs),
630 mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
631 mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
632 mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
dfeec247
XL
633 mir::BinOp::Ne
634 | mir::BinOp::Lt
635 | mir::BinOp::Gt
636 | mir::BinOp::Eq
637 | mir::BinOp::Le
638 | mir::BinOp::Ge => {
639 if is_float {
640 bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
641 } else {
642 bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
643 }
c30ab7b3
SL
644 }
645 }
646 }
647
b7449926
XL
648 pub fn codegen_fat_ptr_binop(
649 &mut self,
a1dfa0c6 650 bx: &mut Bx,
b7449926 651 op: mir::BinOp,
a1dfa0c6
XL
652 lhs_addr: Bx::Value,
653 lhs_extra: Bx::Value,
654 rhs_addr: Bx::Value,
655 rhs_extra: Bx::Value,
b7449926 656 _input_ty: Ty<'tcx>,
a1dfa0c6 657 ) -> Bx::Value {
c30ab7b3
SL
658 match op {
659 mir::BinOp::Eq => {
a1dfa0c6
XL
660 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
661 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
662 bx.and(lhs, rhs)
c30ab7b3
SL
663 }
664 mir::BinOp::Ne => {
a1dfa0c6
XL
665 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
666 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
667 bx.or(lhs, rhs)
c30ab7b3 668 }
dfeec247 669 mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
c30ab7b3
SL
670 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
671 let (op, strict_op) = match op {
a1dfa0c6
XL
672 mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
673 mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
674 mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
675 mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
c30ab7b3
SL
676 _ => bug!(),
677 };
a1dfa0c6
XL
678 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
679 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
680 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
681 let rhs = bx.and(and_lhs, and_rhs);
682 bx.or(lhs, rhs)
c30ab7b3
SL
683 }
684 _ => {
685 bug!("unexpected fat ptr binop");
92a42be0
SL
686 }
687 }
688 }
3157f602 689
a1dfa0c6
XL
690 pub fn codegen_scalar_checked_binop(
691 &mut self,
692 bx: &mut Bx,
693 op: mir::BinOp,
694 lhs: Bx::Value,
695 rhs: Bx::Value,
dfeec247 696 input_ty: Ty<'tcx>,
a1dfa0c6 697 ) -> OperandValue<Bx::Value> {
3157f602
XL
698 // This case can currently arise only from functions marked
699 // with #[rustc_inherit_overflow_checks] and inlined from
700 // another crate (mostly core::num generic/#[inline] fns),
701 // while the current crate doesn't use overflow checks.
a1dfa0c6 702 if !bx.cx().check_overflow() {
94b46f34 703 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
a1dfa0c6 704 return OperandValue::Pair(val, bx.cx().const_bool(false));
3157f602
XL
705 }
706
3157f602
XL
707 let (val, of) = match op {
708 // These are checked using intrinsics
709 mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
710 let oop = match op {
711 mir::BinOp::Add => OverflowOp::Add,
712 mir::BinOp::Sub => OverflowOp::Sub,
713 mir::BinOp::Mul => OverflowOp::Mul,
dfeec247 714 _ => unreachable!(),
3157f602 715 };
a1dfa0c6 716 bx.checked_binop(oop, input_ty, lhs, rhs)
3157f602
XL
717 }
718 mir::BinOp::Shl | mir::BinOp::Shr => {
a1dfa0c6
XL
719 let lhs_llty = bx.cx().val_ty(lhs);
720 let rhs_llty = bx.cx().val_ty(rhs);
721 let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
2c00a5a8 722 let outer_bits = bx.and(rhs, invert_mask);
3157f602 723
a1dfa0c6 724 let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
94b46f34 725 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
3157f602
XL
726
727 (val, of)
728 }
dfeec247 729 _ => bug!("Operator `{:?}` is not a checkable operator", op),
3157f602
XL
730 };
731
732 OperandValue::Pair(val, of)
733 }
a1dfa0c6 734}
92a42be0 735
dc9dc135 736impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
416331ca 737 pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
cc61c64b
XL
738 match *rvalue {
739 mir::Rvalue::Ref(..) |
dfeec247 740 mir::Rvalue::AddressOf(..) |
cc61c64b
XL
741 mir::Rvalue::Len(..) |
742 mir::Rvalue::Cast(..) | // (*)
743 mir::Rvalue::BinaryOp(..) |
744 mir::Rvalue::CheckedBinaryOp(..) |
745 mir::Rvalue::UnaryOp(..) |
746 mir::Rvalue::Discriminant(..) |
7cac9316 747 mir::Rvalue::NullaryOp(..) |
cc61c64b
XL
748 mir::Rvalue::Use(..) => // (*)
749 true,
750 mir::Rvalue::Repeat(..) |
751 mir::Rvalue::Aggregate(..) => {
60c5eb7d 752 let ty = rvalue.ty(*self.mir, self.cx.tcx());
cc61c64b 753 let ty = self.monomorphize(&ty);
416331ca 754 self.cx.spanned_layout_of(ty, span).is_zst()
cc61c64b
XL
755 }
756 }
92a42be0 757
cc61c64b
XL
758 // (*) this is only true if the type is suitable
759 }
92a42be0 760}
3157f602 761
dc9dc135 762fn cast_float_to_int<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
763 bx: &mut Bx,
764 signed: bool,
765 x: Bx::Value,
766 float_ty: Bx::Type,
dfeec247 767 int_ty: Bx::Type,
a1dfa0c6 768) -> Bx::Value {
dfeec247 769 let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) };
abe05a73 770
a1dfa0c6 771 if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
abe05a73
XL
772 return fptosui_result;
773 }
a1dfa0c6
XL
774
775 let int_width = bx.cx().int_width(int_ty);
776 let float_width = bx.cx().float_width(float_ty);
abe05a73
XL
777 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
778 // destination integer type after rounding towards zero. This `undef` value can cause UB in
779 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
780 // Semantically, the mathematical value of the input is rounded towards zero to the next
781 // mathematical integer, and then the result is clamped into the range of the destination
782 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
783 // the destination integer type. NaN is mapped to 0.
784 //
785 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
786 // a value representable in int_ty.
787 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
788 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
789 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
0531ce1d 790 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
abe05a73
XL
791 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
792 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
793 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
794 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
795 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
a1dfa0c6
XL
796 let int_max = |signed: bool, int_width: u64| -> u128 {
797 let shift_amount = 128 - int_width;
dfeec247 798 if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
a1dfa0c6
XL
799 };
800 let int_min = |signed: bool, int_width: u64| -> i128 {
dfeec247 801 if signed { i128::MIN >> (128 - int_width) } else { 0 }
a1dfa0c6
XL
802 };
803
dfeec247 804 let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
a1dfa0c6
XL
805 let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
806 assert_eq!(rounded_min.status, Status::OK);
807 let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
808 assert!(rounded_max.value.is_finite());
809 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
810 };
dfeec247 811 let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
a1dfa0c6
XL
812 let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
813 assert_eq!(rounded_min.status, Status::OK);
814 let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
815 assert!(rounded_max.value.is_finite());
816 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
817 };
818
819 let mut float_bits_to_llval = |bits| {
dfeec247 820 let bits_llval = match float_width {
a1dfa0c6
XL
821 32 => bx.cx().const_u32(bits as u32),
822 64 => bx.cx().const_u64(bits as u64),
abe05a73
XL
823 n => bug!("unsupported float width {}", n),
824 };
a1dfa0c6 825 bx.bitcast(bits_llval, float_ty)
abe05a73 826 };
a1dfa0c6
XL
827 let (f_min, f_max) = match float_width {
828 32 => compute_clamp_bounds_single(signed, int_width),
829 64 => compute_clamp_bounds_double(signed, int_width),
abe05a73
XL
830 n => bug!("unsupported float width {}", n),
831 };
832 let f_min = float_bits_to_llval(f_min);
833 let f_max = float_bits_to_llval(f_max);
834 // To implement saturation, we perform the following steps:
835 //
836 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
837 // 2. Compare x to f_min and f_max, and use the comparison results to select:
838 // a) int_ty::MIN if x < f_min or x is NaN
839 // b) int_ty::MAX if x > f_max
840 // c) the result of fpto[su]i otherwise
841 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
842 //
843 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
844 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
845 // undef does not introduce any non-determinism either.
846 // More importantly, the above procedure correctly implements saturating conversion.
847 // Proof (sketch):
848 // If x is NaN, 0 is returned by definition.
849 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
850 // This yields three cases to consider:
851 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
852 // saturating conversion for inputs in that range.
853 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
854 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
855 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
856 // is correct.
857 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
858 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
859 // QED.
860
861 // Step 1 was already performed above.
862
863 // Step 2: We use two comparisons and two selects, with %s1 being the result:
864 // %less_or_nan = fcmp ult %x, %f_min
865 // %greater = fcmp olt %x, %f_max
866 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
867 // %s1 = select %greater, int_ty::MAX, %s0
868 // Note that %less_or_nan uses an *unordered* comparison. This comparison is true if the
869 // operands are not comparable (i.e., if x is NaN). The unordered comparison ensures that s1
870 // becomes int_ty::MIN if x is NaN.
871 // Performance note: Unordered comparison can be lowered to a "flipped" comparison and a
872 // negation, and the negation can be merged into the select. Therefore, it not necessarily any
873 // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
874 // performed is ultimately up to the backend, but at least x86 does perform them.
a1dfa0c6
XL
875 let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
876 let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
877 let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width));
878 let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
2c00a5a8
XL
879 let s0 = bx.select(less_or_nan, int_min, fptosui_result);
880 let s1 = bx.select(greater, int_max, s0);
abe05a73
XL
881
882 // Step 3: NaN replacement.
883 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
884 // Therefore we only need to execute this step for signed integer types.
885 if signed {
886 // LLVM has no isNaN predicate, so we use (x == x) instead
a1dfa0c6
XL
887 let zero = bx.cx().const_uint(int_ty, 0);
888 let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x);
889 bx.select(cmp, s1, zero)
abe05a73
XL
890 } else {
891 s1
892 }
893}