]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/expr.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc_trans / expr.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! # Translation of Expressions
12 //!
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
20 //!
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
24 //!
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
31 //!
32 //! Public entry points:
33 //!
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
36 //! can manage it.
37 //!
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
41 //! structural type.
42 //!
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
46 //!
47 //! - `trans_var -> Datum`: looks up a local variable, upvar or static.
48
49 #![allow(non_camel_case_types)]
50
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
53
54 use llvm::{self, ValueRef, TypeKind};
55 use middle::const_qualif::ConstQualif;
56 use rustc::hir::def::Def;
57 use rustc::ty::subst::Substs;
58 use {_match, abi, adt, asm, base, closure, consts, controlflow};
59 use base::*;
60 use build::*;
61 use callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp};
62 use cleanup::{self, CleanupMethods, DropHintMethods};
63 use common::*;
64 use datum::*;
65 use debuginfo::{self, DebugLoc, ToDebugLoc};
66 use declare;
67 use glue;
68 use machine;
69 use tvec;
70 use type_of;
71 use value::Value;
72 use Disr;
73 use rustc::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
74 use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
75 use rustc::ty::adjustment::CustomCoerceUnsized;
76 use rustc::ty::{self, Ty, TyCtxt};
77 use rustc::ty::MethodCall;
78 use rustc::ty::cast::{CastKind, CastTy};
79 use util::common::indenter;
80 use machine::{llsize_of, llsize_of_alloc};
81 use type_::Type;
82
83 use rustc::hir;
84
85 use syntax::{ast, codemap};
86 use syntax::parse::token::InternedString;
87 use std::fmt;
88 use std::mem;
89
90 // Destinations
91
92 // These are passed around by the code generating functions to track the
93 // destination of a computation's value.
94
95 #[derive(Copy, Clone, PartialEq)]
96 pub enum Dest {
97 SaveIn(ValueRef),
98 Ignore,
99 }
100
101 impl fmt::Debug for Dest {
102 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
103 match *self {
104 SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)),
105 Ignore => f.write_str("Ignore")
106 }
107 }
108 }
109
110 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
111 /// better optimized LLVM code.
112 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
113 expr: &hir::Expr,
114 dest: Dest)
115 -> Block<'blk, 'tcx> {
116 let mut bcx = bcx;
117
118 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
119
120 if adjustment_required(bcx, expr) {
121 // use trans, which may be less efficient but
122 // which will perform the adjustments:
123 let datum = unpack_datum!(bcx, trans(bcx, expr));
124 return datum.store_to_dest(bcx, dest, expr.id);
125 }
126
127 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
128 if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
129 if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
130 if let SaveIn(lldest) = dest {
131 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
132 bcx.fcx.param_substs,
133 consts::TrueConst::No) {
134 Ok(global) => {
135 // Cast pointer to destination, because constants
136 // have different types.
137 let lldest = PointerCast(bcx, lldest, val_ty(global));
138 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
139 return bcx;
140 },
141 Err(consts::ConstEvalFailure::Runtime(_)) => {
142 // in case const evaluation errors, translate normally
143 // debug assertions catch the same errors
144 // see RFC 1229
145 },
146 Err(consts::ConstEvalFailure::Compiletime(_)) => {
147 return bcx;
148 },
149 }
150 }
151
152 // If we see a const here, that's because it evaluates to a type with zero size. We
153 // should be able to just discard it, since const expressions are guaranteed not to
154 // have side effects. This seems to be reached through tuple struct constructors being
155 // passed zero-size constants.
156 if let hir::ExprPath(..) = expr.node {
157 match bcx.def(expr.id) {
158 Def::Const(_) | Def::AssociatedConst(_) => {
159 assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
160 return bcx;
161 }
162 _ => {}
163 }
164 }
165
166 // Even if we don't have a value to emit, and the expression
167 // doesn't have any side-effects, we still have to translate the
168 // body of any closures.
169 // FIXME: Find a better way of handling this case.
170 } else {
171 // The only way we're going to see a `const` at this point is if
172 // it prefers in-place instantiation, likely because it contains
173 // `[x; N]` somewhere within.
174 match expr.node {
175 hir::ExprPath(..) => {
176 match bcx.def(expr.id) {
177 Def::Const(did) | Def::AssociatedConst(did) => {
178 let empty_substs = bcx.tcx().mk_substs(Substs::empty());
179 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
180 empty_substs);
181 // Temporarily get cleanup scopes out of the way,
182 // as they require sub-expressions to be contained
183 // inside the current AST scope.
184 // These should record no cleanups anyways, `const`
185 // can't have destructors.
186 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
187 vec![]);
188 // Lock emitted debug locations to the location of
189 // the constant reference expression.
190 debuginfo::with_source_location_override(bcx.fcx,
191 expr.debug_loc(),
192 || {
193 bcx = trans_into(bcx, const_expr, dest)
194 });
195 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
196 scopes);
197 assert!(scopes.is_empty());
198 return bcx;
199 }
200 _ => {}
201 }
202 }
203 _ => {}
204 }
205 }
206 }
207
208 debug!("trans_into() expr={:?}", expr);
209
210 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
211 expr.id,
212 expr.span,
213 false);
214 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
215
216 let kind = expr_kind(bcx.tcx(), expr);
217 bcx = match kind {
218 ExprKind::Lvalue | ExprKind::RvalueDatum => {
219 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
220 }
221 ExprKind::RvalueDps => {
222 trans_rvalue_dps_unadjusted(bcx, expr, dest)
223 }
224 ExprKind::RvalueStmt => {
225 trans_rvalue_stmt_unadjusted(bcx, expr)
226 }
227 };
228
229 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
230 }
231
232 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
233 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
234 /// stack.
235 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
236 expr: &hir::Expr)
237 -> DatumBlock<'blk, 'tcx, Expr> {
238 debug!("trans(expr={:?})", expr);
239
240 let mut bcx = bcx;
241 let fcx = bcx.fcx;
242 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
243 let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
244 let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
245 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
246 bcx.fcx.param_substs,
247 consts::TrueConst::No) {
248 Ok(global) => {
249 if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
250 // Is borrowed as 'static, must return lvalue.
251
252 // Cast pointer to global, because constants have different types.
253 let const_ty = expr_ty_adjusted(bcx, expr);
254 let llty = type_of::type_of(bcx.ccx(), const_ty);
255 let global = PointerCast(bcx, global, llty.ptr_to());
256 let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
257 return DatumBlock::new(bcx, datum.to_expr_datum());
258 }
259
260 // Otherwise, keep around and perform adjustments, if needed.
261 let const_ty = if adjusted_global {
262 expr_ty_adjusted(bcx, expr)
263 } else {
264 expr_ty(bcx, expr)
265 };
266
267 // This could use a better heuristic.
268 Some(if type_is_immediate(bcx.ccx(), const_ty) {
269 // Cast pointer to global, because constants have different types.
270 let llty = type_of::type_of(bcx.ccx(), const_ty);
271 let global = PointerCast(bcx, global, llty.ptr_to());
272 // Maybe just get the value directly, instead of loading it?
273 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
274 } else {
275 let scratch = alloc_ty(bcx, const_ty, "const");
276 call_lifetime_start(bcx, scratch);
277 let lldest = if !const_ty.is_structural() {
278 // Cast pointer to slot, because constants have different types.
279 PointerCast(bcx, scratch, val_ty(global))
280 } else {
281 // In this case, memcpy_ty calls llvm.memcpy after casting both
282 // source and destination to i8*, so we don't need any casts.
283 scratch
284 };
285 memcpy_ty(bcx, lldest, global, const_ty);
286 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
287 })
288 },
289 Err(consts::ConstEvalFailure::Runtime(_)) => {
290 // in case const evaluation errors, translate normally
291 // debug assertions catch the same errors
292 // see RFC 1229
293 None
294 },
295 Err(consts::ConstEvalFailure::Compiletime(_)) => {
296 // generate a dummy llvm value
297 let const_ty = expr_ty(bcx, expr);
298 let llty = type_of::type_of(bcx.ccx(), const_ty);
299 let dummy = C_undef(llty.ptr_to());
300 Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
301 },
302 }
303 } else {
304 None
305 };
306
307 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
308 expr.id,
309 expr.span,
310 false);
311 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
312 let datum = match global {
313 Some(rvalue) => rvalue.to_expr_datum(),
314 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
315 };
316 let datum = if adjusted_global {
317 datum // trans::consts already performed adjustments.
318 } else {
319 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
320 };
321 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
322 return DatumBlock::new(bcx, datum);
323 }
324
325 pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
326 StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
327 }
328
329 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
330 StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
331 }
332
333 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
334 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
335 Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
336 }
337
338 fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
339 expr: &hir::Expr) -> bool {
340 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
341 None => { return false; }
342 Some(adj) => adj
343 };
344
345 // Don't skip a conversion from Box<T> to &T, etc.
346 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
347 return true;
348 }
349
350 match adjustment {
351 AdjustReifyFnPointer => true,
352 AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
353 // purely a type-level thing
354 false
355 }
356 AdjustDerefRef(ref adj) => {
357 // We are a bit paranoid about adjustments and thus might have a re-
358 // borrow here which merely derefs and then refs again (it might have
359 // a different region or mutability, but we don't care here).
360 !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
361 }
362 }
363 }
364
365 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
366 /// translation of `expr`.
367 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
368 expr: &hir::Expr,
369 datum: Datum<'tcx, Expr>)
370 -> DatumBlock<'blk, 'tcx, Expr>
371 {
372 let mut bcx = bcx;
373 let mut datum = datum;
374 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
375 None => {
376 return DatumBlock::new(bcx, datum);
377 }
378 Some(adj) => { adj }
379 };
380 debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
381 expr, datum, adjustment);
382 match adjustment {
383 AdjustReifyFnPointer => {
384 match datum.ty.sty {
385 ty::TyFnDef(def_id, substs, _) => {
386 datum = Callee::def(bcx.ccx(), def_id, substs)
387 .reify(bcx.ccx()).to_expr_datum();
388 }
389 _ => {
390 bug!("{} cannot be reified to a fn ptr", datum.ty)
391 }
392 }
393 }
394 AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
395 // purely a type-level thing
396 }
397 AdjustDerefRef(ref adj) => {
398 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
399 // We are a bit paranoid about adjustments and thus might have a re-
400 // borrow here which merely derefs and then refs again (it might have
401 // a different region or mutability, but we don't care here).
402 match datum.ty.sty {
403 // Don't skip a conversion from Box<T> to &T, etc.
404 ty::TyRef(..) => {
405 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
406 // Don't skip an overloaded deref.
407 0
408 } else {
409 1
410 }
411 }
412 _ => 0
413 }
414 } else {
415 0
416 };
417
418 if adj.autoderefs > skip_reborrows {
419 // Schedule cleanup.
420 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
421 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
422 lval.to_expr_datum(),
423 adj.autoderefs - skip_reborrows));
424 }
425
426 // (You might think there is a more elegant way to do this than a
427 // skip_reborrows bool, but then you remember that the borrow checker exists).
428 if skip_reborrows == 0 && adj.autoref.is_some() {
429 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
430 }
431
432 if let Some(target) = adj.unsize {
433 // We do not arrange cleanup ourselves; if we already are an
434 // L-value, then cleanup will have already been scheduled (and
435 // the `datum.to_rvalue_datum` call below will emit code to zero
436 // the drop flag when moving out of the L-value). If we are an
437 // R-value, then we do not need to schedule cleanup.
438 let source_datum = unpack_datum!(bcx,
439 datum.to_rvalue_datum(bcx, "__coerce_source"));
440
441 let target = bcx.monomorphize(&target);
442
443 let scratch = alloc_ty(bcx, target, "__coerce_target");
444 call_lifetime_start(bcx, scratch);
445 let target_datum = Datum::new(scratch, target,
446 Rvalue::new(ByRef));
447 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
448 datum = Datum::new(scratch, target,
449 RvalueExpr(Rvalue::new(ByRef)));
450 }
451 }
452 }
453 debug!("after adjustments, datum={:?}", datum);
454 DatumBlock::new(bcx, datum)
455 }
456
457 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
458 span: codemap::Span,
459 source: Datum<'tcx, Rvalue>,
460 target: Datum<'tcx, Rvalue>)
461 -> Block<'blk, 'tcx> {
462 let mut bcx = bcx;
463 debug!("coerce_unsized({:?} -> {:?})", source, target);
464
465 match (&source.ty.sty, &target.ty.sty) {
466 (&ty::TyBox(a), &ty::TyBox(b)) |
467 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
468 &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
469 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
470 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
471 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
472 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
473 let (inner_source, inner_target) = (a, b);
474
475 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
476 // Normally, the source is a thin pointer and we are
477 // adding extra info to make a fat pointer. The exception
478 // is when we are upcasting an existing object fat pointer
479 // to use a different vtable. In that case, we want to
480 // load out the original data pointer so we can repackage
481 // it.
482 (Load(bcx, get_dataptr(bcx, source.val)),
483 Some(Load(bcx, get_meta(bcx, source.val))))
484 } else {
485 let val = if source.kind.is_by_ref() {
486 load_ty(bcx, source.val, source.ty)
487 } else {
488 source.val
489 };
490 (val, None)
491 };
492
493 let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info);
494
495 // Compute the base pointer. This doesn't change the pointer value,
496 // but merely its type.
497 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
498 let base = PointerCast(bcx, base, ptr_ty);
499
500 Store(bcx, base, get_dataptr(bcx, target.val));
501 Store(bcx, info, get_meta(bcx, target.val));
502 }
503
504 // This can be extended to enums and tuples in the future.
505 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
506 (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
507 assert_eq!(def_id_a, def_id_b);
508
509 // The target is already by-ref because it's to be written to.
510 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
511 assert!(target.kind.is_by_ref());
512
513 let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
514
515 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
516 let src_fields = match &*repr_source {
517 &adt::Repr::Univariant(ref s, _) => &s.fields,
518 _ => span_bug!(span,
519 "Non univariant struct? (repr_source: {:?})",
520 repr_source),
521 };
522 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
523 let target_fields = match &*repr_target {
524 &adt::Repr::Univariant(ref s, _) => &s.fields,
525 _ => span_bug!(span,
526 "Non univariant struct? (repr_target: {:?})",
527 repr_target),
528 };
529
530 let coerce_index = match kind {
531 CustomCoerceUnsized::Struct(i) => i
532 };
533 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
534
535 let source_val = adt::MaybeSizedValue::sized(source.val);
536 let target_val = adt::MaybeSizedValue::sized(target.val);
537
538 let iter = src_fields.iter().zip(target_fields).enumerate();
539 for (i, (src_ty, target_ty)) in iter {
540 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
541 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
542
543 // If this is the field we need to coerce, recurse on it.
544 if i == coerce_index {
545 coerce_unsized(bcx, span,
546 Datum::new(ll_source, src_ty,
547 Rvalue::new(ByRef)),
548 Datum::new(ll_target, target_ty,
549 Rvalue::new(ByRef)));
550 } else {
551 // Otherwise, simply copy the data from the source.
552 assert!(src_ty.is_phantom_data() || src_ty == target_ty);
553 memcpy_ty(bcx, ll_target, ll_source, src_ty);
554 }
555 }
556 }
557 _ => bug!("coerce_unsized: invalid coercion {:?} -> {:?}",
558 source.ty,
559 target.ty)
560 }
561 bcx
562 }
563
564 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
565 /// that the expr represents.
566 ///
567 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
568 /// something like `x().f` is translated into roughly the equivalent of
569 ///
570 /// { tmp = x(); tmp.f }
571 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
572 expr: &hir::Expr,
573 name: &str)
574 -> DatumBlock<'blk, 'tcx, Lvalue> {
575 let mut bcx = bcx;
576 let datum = unpack_datum!(bcx, trans(bcx, expr));
577 return datum.to_lvalue_datum(bcx, name, expr.id);
578 }
579
580 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
581 /// directly.
582 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
583 expr: &hir::Expr)
584 -> DatumBlock<'blk, 'tcx, Expr> {
585 let mut bcx = bcx;
586
587 debug!("trans_unadjusted(expr={:?})", expr);
588 let _indenter = indenter();
589
590 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
591
592 return match expr_kind(bcx.tcx(), expr) {
593 ExprKind::Lvalue | ExprKind::RvalueDatum => {
594 let datum = unpack_datum!(bcx, {
595 trans_datum_unadjusted(bcx, expr)
596 });
597
598 DatumBlock {bcx: bcx, datum: datum}
599 }
600
601 ExprKind::RvalueStmt => {
602 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
603 nil(bcx, expr_ty(bcx, expr))
604 }
605
606 ExprKind::RvalueDps => {
607 let ty = expr_ty(bcx, expr);
608 if type_is_zero_size(bcx.ccx(), ty) {
609 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
610 nil(bcx, ty)
611 } else {
612 let scratch = rvalue_scratch_datum(bcx, ty, "");
613 bcx = trans_rvalue_dps_unadjusted(
614 bcx, expr, SaveIn(scratch.val));
615
616 // Note: this is not obviously a good idea. It causes
617 // immediate values to be loaded immediately after a
618 // return from a call or other similar expression,
619 // which in turn leads to alloca's having shorter
620 // lifetimes and hence larger stack frames. However,
621 // in turn it can lead to more register pressure.
622 // Still, in practice it seems to increase
623 // performance, since we have fewer problems with
624 // morestack churn.
625 let scratch = unpack_datum!(
626 bcx, scratch.to_appropriate_datum(bcx));
627
628 DatumBlock::new(bcx, scratch.to_expr_datum())
629 }
630 }
631 };
632
633 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
634 -> DatumBlock<'blk, 'tcx, Expr> {
635 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
636 let datum = immediate_rvalue(llval, ty);
637 DatumBlock::new(bcx, datum.to_expr_datum())
638 }
639 }
640
641 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
642 expr: &hir::Expr)
643 -> DatumBlock<'blk, 'tcx, Expr> {
644 let mut bcx = bcx;
645 let fcx = bcx.fcx;
646 let _icx = push_ctxt("trans_datum_unadjusted");
647
648 match expr.node {
649 hir::ExprType(ref e, _) => {
650 trans(bcx, &e)
651 }
652 hir::ExprPath(..) => {
653 let var = trans_var(bcx, bcx.def(expr.id));
654 DatumBlock::new(bcx, var.to_expr_datum())
655 }
656 hir::ExprField(ref base, name) => {
657 trans_rec_field(bcx, &base, name.node)
658 }
659 hir::ExprTupField(ref base, idx) => {
660 trans_rec_tup_field(bcx, &base, idx.node)
661 }
662 hir::ExprIndex(ref base, ref idx) => {
663 trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
664 }
665 hir::ExprBox(ref contents) => {
666 // Special case for `Box<T>`
667 let box_ty = expr_ty(bcx, expr);
668 let contents_ty = expr_ty(bcx, &contents);
669 match box_ty.sty {
670 ty::TyBox(..) => {
671 trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
672 }
673 _ => span_bug!(expr.span,
674 "expected unique box")
675 }
676
677 }
678 hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
679 hir::ExprBinary(op, ref lhs, ref rhs) => {
680 trans_binary(bcx, expr, op, &lhs, &rhs)
681 }
682 hir::ExprUnary(op, ref x) => {
683 trans_unary(bcx, expr, op, &x)
684 }
685 hir::ExprAddrOf(_, ref x) => {
686 match x.node {
687 hir::ExprRepeat(..) | hir::ExprVec(..) => {
688 // Special case for slices.
689 let cleanup_debug_loc =
690 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
691 x.id,
692 x.span,
693 false);
694 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
695 let datum = unpack_datum!(
696 bcx, tvec::trans_slice_vec(bcx, expr, &x));
697 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
698 DatumBlock::new(bcx, datum)
699 }
700 _ => {
701 trans_addr_of(bcx, expr, &x)
702 }
703 }
704 }
705 hir::ExprCast(ref val, _) => {
706 // Datum output mode means this is a scalar cast:
707 trans_imm_cast(bcx, &val, expr.id)
708 }
709 _ => {
710 span_bug!(
711 expr.span,
712 "trans_rvalue_datum_unadjusted reached \
713 fall-through case: {:?}",
714 expr.node);
715 }
716 }
717 }
718
719 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
720 base: &hir::Expr,
721 get_idx: F)
722 -> DatumBlock<'blk, 'tcx, Expr> where
723 F: FnOnce(&'blk TyCtxt<'tcx>, &VariantInfo<'tcx>) -> usize,
724 {
725 let mut bcx = bcx;
726 let _icx = push_ctxt("trans_rec_field");
727
728 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
729 let bare_ty = base_datum.ty;
730 let repr = adt::represent_type(bcx.ccx(), bare_ty);
731 let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
732
733 let ix = get_idx(bcx.tcx(), &vinfo);
734 let d = base_datum.get_element(
735 bcx,
736 vinfo.fields[ix].1,
737 |srcval| {
738 adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
739 });
740
741 if type_is_sized(bcx.tcx(), d.ty) {
742 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
743 } else {
744 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
745 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
746 let info = Load(bcx, get_meta(bcx, base_datum.val));
747 Store(bcx, info, get_meta(bcx, scratch.val));
748
749 // Always generate an lvalue datum, because this pointer doesn't own
750 // the data and cleanup is scheduled elsewhere.
751 DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
752 }
753 }
754
755 /// Translates `base.field`.
756 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
757 base: &hir::Expr,
758 field: ast::Name)
759 -> DatumBlock<'blk, 'tcx, Expr> {
760 trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
761 }
762
763 /// Translates `base.<idx>`.
764 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
765 base: &hir::Expr,
766 idx: usize)
767 -> DatumBlock<'blk, 'tcx, Expr> {
768 trans_field(bcx, base, |_, _| idx)
769 }
770
771 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
772 index_expr: &hir::Expr,
773 base: &hir::Expr,
774 idx: &hir::Expr,
775 method_call: MethodCall)
776 -> DatumBlock<'blk, 'tcx, Expr> {
777 //! Translates `base[idx]`.
778
779 let _icx = push_ctxt("trans_index");
780 let ccx = bcx.ccx();
781 let mut bcx = bcx;
782
783 let index_expr_debug_loc = index_expr.debug_loc();
784
785 // Check for overloaded index.
786 let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
787 let elt_datum = match method {
788 Some(method) => {
789 let method_ty = monomorphize_type(bcx, method.ty);
790
791 let base_datum = unpack_datum!(bcx, trans(bcx, base));
792
793 // Translate index expression.
794 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
795
796 let ref_ty = // invoked methods have LB regions instantiated:
797 bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
798 let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
799 None => {
800 span_bug!(index_expr.span,
801 "index method didn't return a \
802 dereferenceable type?!")
803 }
804 Some(elt_tm) => elt_tm.ty,
805 };
806
807 // Overloaded. Invoke the index() method, which basically
808 // yields a `&T` pointer. We can then proceed down the
809 // normal path (below) to dereference that `&T`.
810 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
811
812 bcx = Callee::method(bcx, method)
813 .call(bcx, index_expr_debug_loc,
814 ArgOverloadedOp(base_datum, Some(ix_datum)),
815 Some(SaveIn(scratch.val))).bcx;
816
817 let datum = scratch.to_expr_datum();
818 let lval = Lvalue::new("expr::trans_index overload");
819 if type_is_sized(bcx.tcx(), elt_ty) {
820 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
821 } else {
822 Datum::new(datum.val, elt_ty, LvalueExpr(lval))
823 }
824 }
825 None => {
826 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
827 base,
828 "index"));
829
830 // Translate index expression and cast to a suitable LLVM integer.
831 // Rust is less strict than LLVM in this regard.
832 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
833 let ix_val = ix_datum.to_llscalarish(bcx);
834 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
835 val_ty(ix_val));
836 let int_size = machine::llbitsize_of_real(bcx.ccx(),
837 ccx.int_type());
838 let ix_val = {
839 if ix_size < int_size {
840 if expr_ty(bcx, idx).is_signed() {
841 SExt(bcx, ix_val, ccx.int_type())
842 } else { ZExt(bcx, ix_val, ccx.int_type()) }
843 } else if ix_size > int_size {
844 Trunc(bcx, ix_val, ccx.int_type())
845 } else {
846 ix_val
847 }
848 };
849
850 let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
851
852 let (base, len) = base_datum.get_vec_base_and_len(bcx);
853
854 debug!("trans_index: base {:?}", Value(base));
855 debug!("trans_index: len {:?}", Value(len));
856
857 let bounds_check = ICmp(bcx,
858 llvm::IntUGE,
859 ix_val,
860 len,
861 index_expr_debug_loc);
862 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
863 let expected = Call(bcx,
864 expect,
865 &[bounds_check, C_bool(ccx, false)],
866 index_expr_debug_loc);
867 bcx = with_cond(bcx, expected, |bcx| {
868 controlflow::trans_fail_bounds_check(bcx,
869 expr_info(index_expr),
870 ix_val,
871 len)
872 });
873 let elt = InBoundsGEP(bcx, base, &[ix_val]);
874 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
875 let lval = Lvalue::new("expr::trans_index fallback");
876 Datum::new(elt, unit_ty, LvalueExpr(lval))
877 }
878 };
879
880 DatumBlock::new(bcx, elt_datum)
881 }
882
883 /// Translates a reference to a variable.
884 pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def)
885 -> Datum<'tcx, Lvalue> {
886
887 match def {
888 Def::Static(did, _) => consts::get_static(bcx.ccx(), did),
889 Def::Upvar(_, nid, _, _) => {
890 // Can't move upvars, so this is never a ZeroMemLastUse.
891 let local_ty = node_id_type(bcx, nid);
892 let lval = Lvalue::new_with_hint("expr::trans_var (upvar)",
893 bcx, nid, HintKind::ZeroAndMaintain);
894 match bcx.fcx.llupvars.borrow().get(&nid) {
895 Some(&val) => Datum::new(val, local_ty, lval),
896 None => {
897 bug!("trans_var: no llval for upvar {} found", nid);
898 }
899 }
900 }
901 Def::Local(_, nid) => {
902 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
903 Some(&v) => v,
904 None => {
905 bug!("trans_var: no datum for local/arg {} found", nid);
906 }
907 };
908 debug!("take_local(nid={}, v={:?}, ty={})",
909 nid, Value(datum.val), datum.ty);
910 datum
911 }
912 _ => bug!("{:?} should not reach expr::trans_var", def)
913 }
914 }
915
916 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
917 expr: &hir::Expr)
918 -> Block<'blk, 'tcx> {
919 let mut bcx = bcx;
920 let _icx = push_ctxt("trans_rvalue_stmt");
921
922 if bcx.unreachable.get() {
923 return bcx;
924 }
925
926 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
927
928 match expr.node {
929 hir::ExprBreak(label_opt) => {
930 controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
931 }
932 hir::ExprType(ref e, _) => {
933 trans_into(bcx, &e, Ignore)
934 }
935 hir::ExprAgain(label_opt) => {
936 controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
937 }
938 hir::ExprRet(ref ex) => {
939 // Check to see if the return expression itself is reachable.
940 // This can occur when the inner expression contains a return
941 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
942 cfg.node_is_reachable(expr.id)
943 } else {
944 true
945 };
946
947 if reachable {
948 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
949 } else {
950 // If it's not reachable, just translate the inner expression
951 // directly. This avoids having to manage a return slot when
952 // it won't actually be used anyway.
953 if let &Some(ref x) = ex {
954 bcx = trans_into(bcx, &x, Ignore);
955 }
956 // Mark the end of the block as unreachable. Once we get to
957 // a return expression, there's no more we should be doing
958 // after this.
959 Unreachable(bcx);
960 bcx
961 }
962 }
963 hir::ExprWhile(ref cond, ref body, _) => {
964 controlflow::trans_while(bcx, expr, &cond, &body)
965 }
966 hir::ExprLoop(ref body, _) => {
967 controlflow::trans_loop(bcx, expr, &body)
968 }
969 hir::ExprAssign(ref dst, ref src) => {
970 let src_datum = unpack_datum!(bcx, trans(bcx, &src));
971 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
972
973 if bcx.fcx.type_needs_drop(dst_datum.ty) {
974 // If there are destructors involved, make sure we
975 // are copying from an rvalue, since that cannot possible
976 // alias an lvalue. We are concerned about code like:
977 //
978 // a = a
979 //
980 // but also
981 //
982 // a = a.b
983 //
984 // where e.g. a : Option<Foo> and a.b :
985 // Option<Foo>. In that case, freeing `a` before the
986 // assignment may also free `a.b`!
987 //
988 // We could avoid this intermediary with some analysis
989 // to determine whether `dst` may possibly own `src`.
990 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
991 let src_datum = unpack_datum!(
992 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
993 let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
994 let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
995
996 // 1. Drop the data at the destination, passing the
997 // drop-hint in case the lvalue has already been
998 // dropped or moved.
999 bcx = glue::drop_ty_core(bcx,
1000 dst_datum.val,
1001 dst_datum.ty,
1002 expr.debug_loc(),
1003 false,
1004 opt_hint_val);
1005
1006 // 2. We are overwriting the destination; ensure that
1007 // its drop-hint (if any) says "initialized."
1008 if let Some(hint_val) = opt_hint_val {
1009 let hint_llval = hint_val.value();
1010 let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
1011 Store(bcx, drop_needed, hint_llval);
1012 }
1013 src_datum.store_to(bcx, dst_datum.val)
1014 } else {
1015 src_datum.store_to(bcx, dst_datum.val)
1016 }
1017 }
1018 hir::ExprAssignOp(op, ref dst, ref src) => {
1019 let method = bcx.tcx().tables
1020 .borrow()
1021 .method_map
1022 .get(&MethodCall::expr(expr.id)).cloned();
1023
1024 if let Some(method) = method {
1025 let dst = unpack_datum!(bcx, trans(bcx, &dst));
1026 let src_datum = unpack_datum!(bcx, trans(bcx, &src));
1027
1028 Callee::method(bcx, method)
1029 .call(bcx, expr.debug_loc(),
1030 ArgOverloadedOp(dst, Some(src_datum)), None).bcx
1031 } else {
1032 trans_assign_op(bcx, expr, op, &dst, &src)
1033 }
1034 }
1035 hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
1036 let outputs = outputs.iter().map(|output| {
1037 let out_datum = unpack_datum!(bcx, trans(bcx, output));
1038 unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id))
1039 }).collect();
1040 let inputs = inputs.iter().map(|input| {
1041 let input = unpack_datum!(bcx, trans(bcx, input));
1042 let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in"));
1043 input.to_llscalarish(bcx)
1044 }).collect();
1045 asm::trans_inline_asm(bcx, a, outputs, inputs);
1046 bcx
1047 }
1048 _ => {
1049 span_bug!(
1050 expr.span,
1051 "trans_rvalue_stmt_unadjusted reached \
1052 fall-through case: {:?}",
1053 expr.node);
1054 }
1055 }
1056 }
1057
1058 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1059 expr: &hir::Expr,
1060 dest: Dest)
1061 -> Block<'blk, 'tcx> {
1062 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1063 let mut bcx = bcx;
1064
1065 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1066
1067 // Entry into the method table if this is an overloaded call/op.
1068 let method_call = MethodCall::expr(expr.id);
1069
1070 match expr.node {
1071 hir::ExprType(ref e, _) => {
1072 trans_into(bcx, &e, dest)
1073 }
1074 hir::ExprPath(..) => {
1075 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1076 }
1077 hir::ExprIf(ref cond, ref thn, ref els) => {
1078 controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
1079 }
1080 hir::ExprMatch(ref discr, ref arms, _) => {
1081 _match::trans_match(bcx, expr, &discr, &arms[..], dest)
1082 }
1083 hir::ExprBlock(ref blk) => {
1084 controlflow::trans_block(bcx, &blk, dest)
1085 }
1086 hir::ExprStruct(_, ref fields, ref base) => {
1087 trans_struct(bcx,
1088 &fields[..],
1089 base.as_ref().map(|e| &**e),
1090 expr.span,
1091 expr.id,
1092 node_id_type(bcx, expr.id),
1093 dest)
1094 }
1095 hir::ExprTup(ref args) => {
1096 let numbered_fields: Vec<(usize, &hir::Expr)> =
1097 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1098 trans_adt(bcx,
1099 expr_ty(bcx, expr),
1100 Disr(0),
1101 &numbered_fields[..],
1102 None,
1103 dest,
1104 expr.debug_loc())
1105 }
1106 hir::ExprLit(ref lit) => {
1107 match lit.node {
1108 ast::LitKind::Str(ref s, _) => {
1109 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1110 }
1111 _ => {
1112 span_bug!(expr.span,
1113 "trans_rvalue_dps_unadjusted shouldn't be \
1114 translating this type of literal")
1115 }
1116 }
1117 }
1118 hir::ExprVec(..) | hir::ExprRepeat(..) => {
1119 tvec::trans_fixed_vstore(bcx, expr, dest)
1120 }
1121 hir::ExprClosure(_, ref decl, ref body) => {
1122 let dest = match dest {
1123 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1124 Ignore => closure::Dest::Ignore(bcx.ccx())
1125 };
1126
1127 // NB. To get the id of the closure, we don't use
1128 // `local_def_id(id)`, but rather we extract the closure
1129 // def-id from the expr's type. This is because this may
1130 // be an inlined expression from another crate, and we
1131 // want to get the ORIGINAL closure def-id, since that is
1132 // the key we need to find the closure-kind and
1133 // closure-type etc.
1134 let (def_id, substs) = match expr_ty(bcx, expr).sty {
1135 ty::TyClosure(def_id, ref substs) => (def_id, substs),
1136 ref t =>
1137 span_bug!(
1138 expr.span,
1139 "closure expr without closure type: {:?}", t),
1140 };
1141
1142 closure::trans_closure_expr(dest,
1143 decl,
1144 body,
1145 expr.id,
1146 def_id,
1147 substs).unwrap_or(bcx)
1148 }
1149 hir::ExprCall(ref f, ref args) => {
1150 let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned();
1151 let (callee, args) = if let Some(method) = method {
1152 let mut all_args = vec![&**f];
1153 all_args.extend(args.iter().map(|e| &**e));
1154
1155 (Callee::method(bcx, method), ArgOverloadedCall(all_args))
1156 } else {
1157 let f = unpack_datum!(bcx, trans(bcx, f));
1158 (match f.ty.sty {
1159 ty::TyFnDef(def_id, substs, _) => {
1160 Callee::def(bcx.ccx(), def_id, substs)
1161 }
1162 ty::TyFnPtr(_) => {
1163 let f = unpack_datum!(bcx,
1164 f.to_rvalue_datum(bcx, "callee"));
1165 Callee::ptr(f)
1166 }
1167 _ => {
1168 span_bug!(expr.span,
1169 "type of callee is not a fn: {}", f.ty);
1170 }
1171 }, ArgExprs(&args))
1172 };
1173 callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx
1174 }
1175 hir::ExprMethodCall(_, _, ref args) => {
1176 Callee::method_call(bcx, method_call)
1177 .call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx
1178 }
1179 hir::ExprBinary(op, ref lhs, ref rhs_expr) => {
1180 // if not overloaded, would be RvalueDatumExpr
1181 let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
1182 let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr));
1183 if !op.node.is_by_value() {
1184 rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr));
1185 }
1186
1187 Callee::method_call(bcx, method_call)
1188 .call(bcx, expr.debug_loc(),
1189 ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx
1190 }
1191 hir::ExprUnary(_, ref subexpr) => {
1192 // if not overloaded, would be RvalueDatumExpr
1193 let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
1194
1195 Callee::method_call(bcx, method_call)
1196 .call(bcx, expr.debug_loc(),
1197 ArgOverloadedOp(arg, None), Some(dest)).bcx
1198 }
1199 hir::ExprCast(..) => {
1200 // Trait casts used to come this way, now they should be coercions.
1201 span_bug!(expr.span, "DPS expr_cast (residual trait cast?)")
1202 }
1203 hir::ExprAssignOp(op, _, _) => {
1204 span_bug!(
1205 expr.span,
1206 "augmented assignment `{}=` should always be a rvalue_stmt",
1207 op.node.as_str())
1208 }
1209 _ => {
1210 span_bug!(
1211 expr.span,
1212 "trans_rvalue_dps_unadjusted reached fall-through \
1213 case: {:?}",
1214 expr.node);
1215 }
1216 }
1217 }
1218
1219 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1220 ref_expr: &hir::Expr,
1221 def: Def,
1222 dest: Dest)
1223 -> Block<'blk, 'tcx> {
1224 let _icx = push_ctxt("trans_def_dps_unadjusted");
1225
1226 let lldest = match dest {
1227 SaveIn(lldest) => lldest,
1228 Ignore => { return bcx; }
1229 };
1230
1231 let ty = expr_ty(bcx, ref_expr);
1232 if let ty::TyFnDef(..) = ty.sty {
1233 // Zero-sized function or ctor.
1234 return bcx;
1235 }
1236
1237 match def {
1238 Def::Variant(tid, vid) => {
1239 let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
1240 // Nullary variant.
1241 let ty = expr_ty(bcx, ref_expr);
1242 let repr = adt::represent_type(bcx.ccx(), ty);
1243 adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
1244 bcx
1245 }
1246 Def::Struct(..) => {
1247 match ty.sty {
1248 ty::TyStruct(def, _) if def.has_dtor() => {
1249 let repr = adt::represent_type(bcx.ccx(), ty);
1250 adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
1251 }
1252 _ => {}
1253 }
1254 bcx
1255 }
1256 _ => {
1257 span_bug!(ref_expr.span,
1258 "Non-DPS def {:?} referened by {}",
1259 def, bcx.node_id_to_string(ref_expr.id));
1260 }
1261 }
1262 }
1263
1264 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1265 fields: &[hir::Field],
1266 base: Option<&hir::Expr>,
1267 expr_span: codemap::Span,
1268 expr_id: ast::NodeId,
1269 ty: Ty<'tcx>,
1270 dest: Dest) -> Block<'blk, 'tcx> {
1271 let _icx = push_ctxt("trans_rec");
1272
1273 let tcx = bcx.tcx();
1274 let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
1275
1276 let mut need_base = vec![true; vinfo.fields.len()];
1277
1278 let numbered_fields = fields.iter().map(|field| {
1279 let pos = vinfo.field_index(field.name.node);
1280 need_base[pos] = false;
1281 (pos, &*field.expr)
1282 }).collect::<Vec<_>>();
1283
1284 let optbase = match base {
1285 Some(base_expr) => {
1286 let mut leftovers = Vec::new();
1287 for (i, b) in need_base.iter().enumerate() {
1288 if *b {
1289 leftovers.push((i, vinfo.fields[i].1));
1290 }
1291 }
1292 Some(StructBaseInfo {expr: base_expr,
1293 fields: leftovers })
1294 }
1295 None => {
1296 if need_base.iter().any(|b| *b) {
1297 span_bug!(expr_span, "missing fields and no base expr")
1298 }
1299 None
1300 }
1301 };
1302
1303 trans_adt(bcx,
1304 ty,
1305 vinfo.discr,
1306 &numbered_fields,
1307 optbase,
1308 dest,
1309 DebugLoc::At(expr_id, expr_span))
1310 }
1311
1312 /// Information that `trans_adt` needs in order to fill in the fields
1313 /// of a struct copied from a base struct (e.g., from an expression
1314 /// like `Foo { a: b, ..base }`.
1315 ///
1316 /// Note that `fields` may be empty; the base expression must always be
1317 /// evaluated for side-effects.
1318 pub struct StructBaseInfo<'a, 'tcx> {
1319 /// The base expression; will be evaluated after all explicit fields.
1320 expr: &'a hir::Expr,
1321 /// The indices of fields to copy paired with their types.
1322 fields: Vec<(usize, Ty<'tcx>)>
1323 }
1324
1325 /// Constructs an ADT instance:
1326 ///
1327 /// - `fields` should be a list of field indices paired with the
1328 /// expression to store into that field. The initializers will be
1329 /// evaluated in the order specified by `fields`.
1330 ///
1331 /// - `optbase` contains information on the base struct (if any) from
1332 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1333 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1334 ty: Ty<'tcx>,
1335 discr: Disr,
1336 fields: &[(usize, &hir::Expr)],
1337 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1338 dest: Dest,
1339 debug_location: DebugLoc)
1340 -> Block<'blk, 'tcx> {
1341 let _icx = push_ctxt("trans_adt");
1342 let fcx = bcx.fcx;
1343 let repr = adt::represent_type(bcx.ccx(), ty);
1344
1345 debug_location.apply(bcx.fcx);
1346
1347 // If we don't care about the result, just make a
1348 // temporary stack slot
1349 let addr = match dest {
1350 SaveIn(pos) => pos,
1351 Ignore => {
1352 let llresult = alloc_ty(bcx, ty, "temp");
1353 call_lifetime_start(bcx, llresult);
1354 llresult
1355 }
1356 };
1357
1358 debug!("trans_adt");
1359
1360 // This scope holds intermediates that must be cleaned should
1361 // panic occur before the ADT as a whole is ready.
1362 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1363
1364 if ty.is_simd() {
1365 // Issue 23112: The original logic appeared vulnerable to same
1366 // order-of-eval bug. But, SIMD values are tuple-structs;
1367 // i.e. functional record update (FRU) syntax is unavailable.
1368 //
1369 // To be safe, double-check that we did not get here via FRU.
1370 assert!(optbase.is_none());
1371
1372 // This is the constructor of a SIMD type, such types are
1373 // always primitive machine types and so do not have a
1374 // destructor or require any clean-up.
1375 let llty = type_of::type_of(bcx.ccx(), ty);
1376
1377 // keep a vector as a register, and running through the field
1378 // `insertelement`ing them directly into that register
1379 // (i.e. avoid GEPi and `store`s to an alloca) .
1380 let mut vec_val = C_undef(llty);
1381
1382 for &(i, ref e) in fields {
1383 let block_datum = trans(bcx, &e);
1384 bcx = block_datum.bcx;
1385 let position = C_uint(bcx.ccx(), i);
1386 let value = block_datum.datum.to_llscalarish(bcx);
1387 vec_val = InsertElement(bcx, vec_val, value, position);
1388 }
1389 Store(bcx, vec_val, addr);
1390 } else if let Some(base) = optbase {
1391 // Issue 23112: If there is a base, then order-of-eval
1392 // requires field expressions eval'ed before base expression.
1393
1394 // First, trans field expressions to temporary scratch values.
1395 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1396 let datum = unpack_datum!(bcx, trans(bcx, &e));
1397 (i, datum)
1398 }).collect();
1399
1400 debug_location.apply(bcx.fcx);
1401
1402 // Second, trans the base to the dest.
1403 assert_eq!(discr, Disr(0));
1404
1405 let addr = adt::MaybeSizedValue::sized(addr);
1406 match expr_kind(bcx.tcx(), &base.expr) {
1407 ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
1408 bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
1409 },
1410 ExprKind::RvalueStmt => {
1411 bug!("unexpected expr kind for struct base expr")
1412 }
1413 _ => {
1414 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
1415 for &(i, t) in &base.fields {
1416 let datum = base_datum.get_element(
1417 bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
1418 assert!(type_is_sized(bcx.tcx(), datum.ty));
1419 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1420 bcx = datum.store_to(bcx, dest);
1421 }
1422 }
1423 }
1424
1425 // Finally, move scratch field values into actual field locations
1426 for (i, datum) in scratch_vals {
1427 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1428 bcx = datum.store_to(bcx, dest);
1429 }
1430 } else {
1431 // No base means we can write all fields directly in place.
1432 let addr = adt::MaybeSizedValue::sized(addr);
1433 for &(i, ref e) in fields {
1434 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1435 let e_ty = expr_ty_adjusted(bcx, &e);
1436 bcx = trans_into(bcx, &e, SaveIn(dest));
1437 let scope = cleanup::CustomScope(custom_cleanup_scope);
1438 fcx.schedule_lifetime_end(scope, dest);
1439 // FIXME: nonzeroing move should generalize to fields
1440 fcx.schedule_drop_mem(scope, dest, e_ty, None);
1441 }
1442 }
1443
1444 adt::trans_set_discr(bcx, &repr, addr, discr);
1445
1446 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1447
1448 // If we don't care about the result drop the temporary we made
1449 match dest {
1450 SaveIn(_) => bcx,
1451 Ignore => {
1452 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1453 base::call_lifetime_end(bcx, addr);
1454 bcx
1455 }
1456 }
1457 }
1458
1459
1460 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1461 expr: &hir::Expr,
1462 lit: &ast::Lit)
1463 -> DatumBlock<'blk, 'tcx, Expr> {
1464 // must not be a string constant, that is a RvalueDpsExpr
1465 let _icx = push_ctxt("trans_immediate_lit");
1466 let ty = expr_ty(bcx, expr);
1467 let v = consts::const_lit(bcx.ccx(), expr, lit);
1468 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1469 }
1470
1471 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1472 expr: &hir::Expr,
1473 op: hir::UnOp,
1474 sub_expr: &hir::Expr)
1475 -> DatumBlock<'blk, 'tcx, Expr> {
1476 let ccx = bcx.ccx();
1477 let mut bcx = bcx;
1478 let _icx = push_ctxt("trans_unary_datum");
1479
1480 let method_call = MethodCall::expr(expr.id);
1481
1482 // The only overloaded operator that is translated to a datum
1483 // is an overloaded deref, since it is always yields a `&T`.
1484 // Otherwise, we should be in the RvalueDpsExpr path.
1485 assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
1486
1487 let un_ty = expr_ty(bcx, expr);
1488
1489 let debug_loc = expr.debug_loc();
1490
1491 match op {
1492 hir::UnNot => {
1493 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1494 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1495 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1496 }
1497 hir::UnNeg => {
1498 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1499 let val = datum.to_llscalarish(bcx);
1500 let (bcx, llneg) = {
1501 if un_ty.is_fp() {
1502 let result = FNeg(bcx, val, debug_loc);
1503 (bcx, result)
1504 } else {
1505 let is_signed = un_ty.is_signed();
1506 let result = Neg(bcx, val, debug_loc);
1507 let bcx = if bcx.ccx().check_overflow() && is_signed {
1508 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1509 let is_min = ICmp(bcx, llvm::IntEQ, val,
1510 C_integral(llty, min, true), debug_loc);
1511 with_cond(bcx, is_min, |bcx| {
1512 let msg = InternedString::new(
1513 "attempted to negate with overflow");
1514 controlflow::trans_fail(bcx, expr_info(expr), msg)
1515 })
1516 } else {
1517 bcx
1518 };
1519 (bcx, result)
1520 }
1521 };
1522 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1523 }
1524 hir::UnDeref => {
1525 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1526 deref_once(bcx, expr, datum, method_call)
1527 }
1528 }
1529 }
1530
1531 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1532 box_expr: &hir::Expr,
1533 box_ty: Ty<'tcx>,
1534 contents: &hir::Expr,
1535 contents_ty: Ty<'tcx>)
1536 -> DatumBlock<'blk, 'tcx, Expr> {
1537 let _icx = push_ctxt("trans_uniq_expr");
1538 let fcx = bcx.fcx;
1539 assert!(type_is_sized(bcx.tcx(), contents_ty));
1540 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1541 let size = llsize_of(bcx.ccx(), llty);
1542 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1543 let llty_ptr = llty.ptr_to();
1544 let Result { bcx, val } = malloc_raw_dyn(bcx,
1545 llty_ptr,
1546 box_ty,
1547 size,
1548 align,
1549 box_expr.debug_loc());
1550 // Unique boxes do not allocate for zero-size types. The standard library
1551 // may assume that `free` is never called on the pointer returned for
1552 // `Box<ZeroSizeType>`.
1553 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1554 trans_into(bcx, contents, SaveIn(val))
1555 } else {
1556 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1557 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1558 val, cleanup::HeapExchange, contents_ty);
1559 let bcx = trans_into(bcx, contents, SaveIn(val));
1560 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1561 bcx
1562 };
1563 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1564 }
1565
1566 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1567 expr: &hir::Expr,
1568 subexpr: &hir::Expr)
1569 -> DatumBlock<'blk, 'tcx, Expr> {
1570 let _icx = push_ctxt("trans_addr_of");
1571 let mut bcx = bcx;
1572 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1573 let ty = expr_ty(bcx, expr);
1574 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1575 // Always generate an lvalue datum, because this pointer doesn't own
1576 // the data and cleanup is scheduled elsewhere.
1577 DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
1578 } else {
1579 // Sized value, ref to a thin pointer
1580 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1581 }
1582 }
1583
1584 fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1585 binop_expr: &hir::Expr,
1586 binop_ty: Ty<'tcx>,
1587 op: hir::BinOp,
1588 lhs: Datum<'tcx, Rvalue>,
1589 rhs: Datum<'tcx, Rvalue>)
1590 -> DatumBlock<'blk, 'tcx, Expr>
1591 {
1592 let _icx = push_ctxt("trans_scalar_binop");
1593
1594 let tcx = bcx.tcx();
1595 let lhs_t = lhs.ty;
1596 assert!(!lhs_t.is_simd());
1597 let is_float = lhs_t.is_fp();
1598 let is_signed = lhs_t.is_signed();
1599 let info = expr_info(binop_expr);
1600
1601 let binop_debug_loc = binop_expr.debug_loc();
1602
1603 let mut bcx = bcx;
1604 let lhs = lhs.to_llscalarish(bcx);
1605 let rhs = rhs.to_llscalarish(bcx);
1606 let val = match op.node {
1607 hir::BiAdd => {
1608 if is_float {
1609 FAdd(bcx, lhs, rhs, binop_debug_loc)
1610 } else {
1611 let (newbcx, res) = with_overflow_check(
1612 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1613 bcx = newbcx;
1614 res
1615 }
1616 }
1617 hir::BiSub => {
1618 if is_float {
1619 FSub(bcx, lhs, rhs, binop_debug_loc)
1620 } else {
1621 let (newbcx, res) = with_overflow_check(
1622 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1623 bcx = newbcx;
1624 res
1625 }
1626 }
1627 hir::BiMul => {
1628 if is_float {
1629 FMul(bcx, lhs, rhs, binop_debug_loc)
1630 } else {
1631 let (newbcx, res) = with_overflow_check(
1632 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1633 bcx = newbcx;
1634 res
1635 }
1636 }
1637 hir::BiDiv => {
1638 if is_float {
1639 FDiv(bcx, lhs, rhs, binop_debug_loc)
1640 } else {
1641 // Only zero-check integers; fp /0 is NaN
1642 bcx = base::fail_if_zero_or_overflows(bcx,
1643 expr_info(binop_expr),
1644 op,
1645 lhs,
1646 rhs,
1647 lhs_t);
1648 if is_signed {
1649 SDiv(bcx, lhs, rhs, binop_debug_loc)
1650 } else {
1651 UDiv(bcx, lhs, rhs, binop_debug_loc)
1652 }
1653 }
1654 }
1655 hir::BiRem => {
1656 if is_float {
1657 // LLVM currently always lowers the `frem` instructions appropriate
1658 // library calls typically found in libm. Notably f64 gets wired up
1659 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1660 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1661 // instead just an inline function in a header that goes up to a
1662 // f64, uses `fmod`, and then comes back down to a f32.
1663 //
1664 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1665 // still unconditionally lower frem instructions over 32-bit floats
1666 // to a call to `fmodf`. To work around this we special case MSVC
1667 // 32-bit float rem instructions and instead do the call out to
1668 // `fmod` ourselves.
1669 //
1670 // Note that this is currently duplicated with src/libcore/ops.rs
1671 // which does the same thing, and it would be nice to perhaps unify
1672 // these two implementations on day! Also note that we call `fmod`
1673 // for both 32 and 64-bit floats because if we emit any FRem
1674 // instruction at all then LLVM is capable of optimizing it into a
1675 // 32-bit FRem (which we're trying to avoid).
1676 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
1677 tcx.sess.target.target.arch == "x86";
1678 if use_fmod {
1679 let f64t = Type::f64(bcx.ccx());
1680 let fty = Type::func(&[f64t, f64t], &f64t);
1681 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
1682 if lhs_t == tcx.types.f32 {
1683 let lhs = FPExt(bcx, lhs, f64t);
1684 let rhs = FPExt(bcx, rhs, f64t);
1685 let res = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
1686 FPTrunc(bcx, res, Type::f32(bcx.ccx()))
1687 } else {
1688 Call(bcx, llfn, &[lhs, rhs], binop_debug_loc)
1689 }
1690 } else {
1691 FRem(bcx, lhs, rhs, binop_debug_loc)
1692 }
1693 } else {
1694 // Only zero-check integers; fp %0 is NaN
1695 bcx = base::fail_if_zero_or_overflows(bcx,
1696 expr_info(binop_expr),
1697 op, lhs, rhs, lhs_t);
1698 if is_signed {
1699 SRem(bcx, lhs, rhs, binop_debug_loc)
1700 } else {
1701 URem(bcx, lhs, rhs, binop_debug_loc)
1702 }
1703 }
1704 }
1705 hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1706 hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1707 hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1708 hir::BiShl => {
1709 let (newbcx, res) = with_overflow_check(
1710 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1711 bcx = newbcx;
1712 res
1713 }
1714 hir::BiShr => {
1715 let (newbcx, res) = with_overflow_check(
1716 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1717 bcx = newbcx;
1718 res
1719 }
1720 hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
1721 base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
1722 }
1723 _ => {
1724 span_bug!(binop_expr.span, "unexpected binop");
1725 }
1726 };
1727
1728 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1729 }
1730
1731 // refinement types would obviate the need for this
1732 enum lazy_binop_ty {
1733 lazy_and,
1734 lazy_or,
1735 }
1736
1737 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1738 binop_expr: &hir::Expr,
1739 op: lazy_binop_ty,
1740 a: &hir::Expr,
1741 b: &hir::Expr)
1742 -> DatumBlock<'blk, 'tcx, Expr> {
1743 let _icx = push_ctxt("trans_lazy_binop");
1744 let binop_ty = expr_ty(bcx, binop_expr);
1745 let fcx = bcx.fcx;
1746
1747 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1748 let lhs = lhs.to_llscalarish(past_lhs);
1749
1750 if past_lhs.unreachable.get() {
1751 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1752 }
1753
1754 let join = fcx.new_id_block("join", binop_expr.id);
1755 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1756
1757 match op {
1758 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1759 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1760 }
1761
1762 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1763 let rhs = rhs.to_llscalarish(past_rhs);
1764
1765 if past_rhs.unreachable.get() {
1766 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1767 }
1768
1769 Br(past_rhs, join.llbb, DebugLoc::None);
1770 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1771 &[past_lhs.llbb, past_rhs.llbb]);
1772
1773 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1774 }
1775
1776 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1777 expr: &hir::Expr,
1778 op: hir::BinOp,
1779 lhs: &hir::Expr,
1780 rhs: &hir::Expr)
1781 -> DatumBlock<'blk, 'tcx, Expr> {
1782 let _icx = push_ctxt("trans_binary");
1783 let ccx = bcx.ccx();
1784
1785 // if overloaded, would be RvalueDpsExpr
1786 assert!(!ccx.tcx().is_method_call(expr.id));
1787
1788 match op.node {
1789 hir::BiAnd => {
1790 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1791 }
1792 hir::BiOr => {
1793 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1794 }
1795 _ => {
1796 let mut bcx = bcx;
1797 let binop_ty = expr_ty(bcx, expr);
1798
1799 let lhs = unpack_datum!(bcx, trans(bcx, lhs));
1800 let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
1801 debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs);
1802 let rhs = unpack_datum!(bcx, trans(bcx, rhs));
1803 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
1804 debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs);
1805
1806 if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
1807 assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
1808 "built-in binary operators on fat pointers are homogeneous");
1809 assert_eq!(binop_ty, bcx.tcx().types.bool);
1810 let val = base::compare_scalar_types(
1811 bcx,
1812 lhs.val,
1813 rhs.val,
1814 lhs.ty,
1815 op.node,
1816 expr.debug_loc());
1817 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1818 } else {
1819 assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
1820 "built-in binary operators on fat pointers are homogeneous");
1821 trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
1822 }
1823 }
1824 }
1825 }
1826
1827 pub fn cast_is_noop<'tcx>(tcx: &TyCtxt<'tcx>,
1828 expr: &hir::Expr,
1829 t_in: Ty<'tcx>,
1830 t_out: Ty<'tcx>)
1831 -> bool {
1832 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
1833 return true;
1834 }
1835
1836 match (t_in.builtin_deref(true, ty::NoPreference),
1837 t_out.builtin_deref(true, ty::NoPreference)) {
1838 (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
1839 t_in == t_out
1840 }
1841 _ => {
1842 // This condition isn't redundant with the check for CoercionCast:
1843 // different types can be substituted into the same type, and
1844 // == equality can be overconservative if there are regions.
1845 t_in == t_out
1846 }
1847 }
1848 }
1849
1850 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1851 expr: &hir::Expr,
1852 id: ast::NodeId)
1853 -> DatumBlock<'blk, 'tcx, Expr>
1854 {
1855 use rustc::ty::cast::CastTy::*;
1856 use rustc::ty::cast::IntTy::*;
1857
1858 fn int_cast(bcx: Block,
1859 lldsttype: Type,
1860 llsrctype: Type,
1861 llsrc: ValueRef,
1862 signed: bool)
1863 -> ValueRef
1864 {
1865 let _icx = push_ctxt("int_cast");
1866 let srcsz = llsrctype.int_width();
1867 let dstsz = lldsttype.int_width();
1868 return if dstsz == srcsz {
1869 BitCast(bcx, llsrc, lldsttype)
1870 } else if srcsz > dstsz {
1871 TruncOrBitCast(bcx, llsrc, lldsttype)
1872 } else if signed {
1873 SExtOrBitCast(bcx, llsrc, lldsttype)
1874 } else {
1875 ZExtOrBitCast(bcx, llsrc, lldsttype)
1876 }
1877 }
1878
1879 fn float_cast(bcx: Block,
1880 lldsttype: Type,
1881 llsrctype: Type,
1882 llsrc: ValueRef)
1883 -> ValueRef
1884 {
1885 let _icx = push_ctxt("float_cast");
1886 let srcsz = llsrctype.float_width();
1887 let dstsz = lldsttype.float_width();
1888 return if dstsz > srcsz {
1889 FPExt(bcx, llsrc, lldsttype)
1890 } else if srcsz > dstsz {
1891 FPTrunc(bcx, llsrc, lldsttype)
1892 } else { llsrc };
1893 }
1894
1895 let _icx = push_ctxt("trans_cast");
1896 let mut bcx = bcx;
1897 let ccx = bcx.ccx();
1898
1899 let t_in = expr_ty_adjusted(bcx, expr);
1900 let t_out = node_id_type(bcx, id);
1901
1902 debug!("trans_cast({:?} as {:?})", t_in, t_out);
1903 let mut ll_t_in = type_of::immediate_type_of(ccx, t_in);
1904 let ll_t_out = type_of::immediate_type_of(ccx, t_out);
1905 // Convert the value to be cast into a ValueRef, either by-ref or
1906 // by-value as appropriate given its type:
1907 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
1908
1909 let datum_ty = monomorphize_type(bcx, datum.ty);
1910
1911 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
1912 datum.ty = t_out;
1913 return DatumBlock::new(bcx, datum);
1914 }
1915
1916 if type_is_fat_ptr(bcx.tcx(), t_in) {
1917 assert!(datum.kind.is_by_ref());
1918 if type_is_fat_ptr(bcx.tcx(), t_out) {
1919 return DatumBlock::new(bcx, Datum::new(
1920 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
1921 t_out,
1922 Rvalue::new(ByRef)
1923 )).to_expr_datumblock();
1924 } else {
1925 // Return the address
1926 return immediate_rvalue_bcx(bcx,
1927 PointerCast(bcx,
1928 Load(bcx, get_dataptr(bcx, datum.val)),
1929 ll_t_out),
1930 t_out).to_expr_datumblock();
1931 }
1932 }
1933
1934 let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
1935 let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
1936
1937 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
1938 let repr = adt::represent_type(ccx, t_in);
1939 let datum = unpack_datum!(
1940 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
1941 let llexpr_ptr = datum.to_llref();
1942 let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
1943 Some(Type::i64(ccx)), true);
1944 ll_t_in = val_ty(discr);
1945 (discr, adt::is_discr_signed(&repr))
1946 } else {
1947 (datum.to_llscalarish(bcx), t_in.is_signed())
1948 };
1949
1950 let newval = match (r_t_in, r_t_out) {
1951 (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
1952 PointerCast(bcx, llexpr, ll_t_out)
1953 }
1954 (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
1955 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
1956
1957 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
1958 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
1959 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
1960 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
1961 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
1962 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
1963
1964 _ => span_bug!(expr.span,
1965 "translating unsupported cast: \
1966 {:?} -> {:?}",
1967 t_in,
1968 t_out)
1969 };
1970 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
1971 }
1972
1973 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1974 expr: &hir::Expr,
1975 op: hir::BinOp,
1976 dst: &hir::Expr,
1977 src: &hir::Expr)
1978 -> Block<'blk, 'tcx> {
1979 let _icx = push_ctxt("trans_assign_op");
1980 let mut bcx = bcx;
1981
1982 debug!("trans_assign_op(expr={:?})", expr);
1983
1984 // User-defined operator methods cannot be used with `+=` etc right now
1985 assert!(!bcx.tcx().is_method_call(expr.id));
1986
1987 // Evaluate LHS (destination), which should be an lvalue
1988 let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
1989 assert!(!bcx.fcx.type_needs_drop(dst.ty));
1990 let lhs = load_ty(bcx, dst.val, dst.ty);
1991 let lhs = immediate_rvalue(lhs, dst.ty);
1992
1993 // Evaluate RHS - FIXME(#28160) this sucks
1994 let rhs = unpack_datum!(bcx, trans(bcx, &src));
1995 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
1996
1997 // Perform computation and store the result
1998 let result_datum = unpack_datum!(
1999 bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
2000 return result_datum.store_to(bcx, dst.val);
2001 }
2002
2003 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2004 datum: Datum<'tcx, Expr>,
2005 expr: &hir::Expr)
2006 -> DatumBlock<'blk, 'tcx, Expr> {
2007 let mut bcx = bcx;
2008
2009 // Ensure cleanup of `datum` if not already scheduled and obtain
2010 // a "by ref" pointer.
2011 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2012
2013 // Compute final type. Note that we are loose with the region and
2014 // mutability, since those things don't matter in trans.
2015 let referent_ty = lv_datum.ty;
2016 let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2017
2018 // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
2019 // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
2020 // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
2021 // indirection and for thin pointers, this has no ill effects.
2022 let kind = if type_is_sized(bcx.tcx(), referent_ty) {
2023 RvalueExpr(Rvalue::new(ByValue))
2024 } else {
2025 LvalueExpr(lv_datum.kind)
2026 };
2027
2028 // Get the pointer.
2029 let llref = lv_datum.to_llref();
2030 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
2031 }
2032
2033 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2034 expr: &hir::Expr,
2035 datum: Datum<'tcx, Expr>,
2036 times: usize)
2037 -> DatumBlock<'blk, 'tcx, Expr> {
2038 let mut bcx = bcx;
2039 let mut datum = datum;
2040 for i in 0..times {
2041 let method_call = MethodCall::autoderef(expr.id, i as u32);
2042 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2043 }
2044 DatumBlock { bcx: bcx, datum: datum }
2045 }
2046
2047 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2048 expr: &hir::Expr,
2049 datum: Datum<'tcx, Expr>,
2050 method_call: MethodCall)
2051 -> DatumBlock<'blk, 'tcx, Expr> {
2052 let ccx = bcx.ccx();
2053
2054 debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
2055 expr, datum, method_call);
2056
2057 let mut bcx = bcx;
2058
2059 // Check for overloaded deref.
2060 let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
2061 let datum = match method {
2062 Some(method) => {
2063 let method_ty = monomorphize_type(bcx, method.ty);
2064
2065 // Overloaded. Invoke the deref() method, which basically
2066 // converts from the `Smaht<T>` pointer that we have into
2067 // a `&T` pointer. We can then proceed down the normal
2068 // path (below) to dereference that `&T`.
2069 let datum = if method_call.autoderef == 0 {
2070 datum
2071 } else {
2072 // Always perform an AutoPtr when applying an overloaded auto-deref
2073 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2074 };
2075
2076 let ref_ty = // invoked methods have their LB regions instantiated
2077 ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
2078 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2079
2080 bcx = Callee::method(bcx, method)
2081 .call(bcx, expr.debug_loc(),
2082 ArgOverloadedOp(datum, None),
2083 Some(SaveIn(scratch.val))).bcx;
2084 scratch.to_expr_datum()
2085 }
2086 None => {
2087 // Not overloaded. We already have a pointer we know how to deref.
2088 datum
2089 }
2090 };
2091
2092 let r = match datum.ty.sty {
2093 ty::TyBox(content_ty) => {
2094 // Make sure we have an lvalue datum here to get the
2095 // proper cleanups scheduled
2096 let datum = unpack_datum!(
2097 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2098
2099 if type_is_sized(bcx.tcx(), content_ty) {
2100 let ptr = load_ty(bcx, datum.val, datum.ty);
2101 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
2102 } else {
2103 // A fat pointer and a DST lvalue have the same representation
2104 // just different types. Since there is no temporary for `*e`
2105 // here (because it is unsized), we cannot emulate the sized
2106 // object code path for running drop glue and free. Instead,
2107 // we schedule cleanup for `e`, turning it into an lvalue.
2108
2109 let lval = Lvalue::new("expr::deref_once ty_uniq");
2110 let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
2111 DatumBlock::new(bcx, datum)
2112 }
2113 }
2114
2115 ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
2116 ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
2117 let lval = Lvalue::new("expr::deref_once ptr");
2118 if type_is_sized(bcx.tcx(), content_ty) {
2119 let ptr = datum.to_llscalarish(bcx);
2120
2121 // Always generate an lvalue datum, even if datum.mode is
2122 // an rvalue. This is because datum.mode is only an
2123 // rvalue for non-owning pointers like &T or *T, in which
2124 // case cleanup *is* scheduled elsewhere, by the true
2125 // owner (or, in the case of *T, by the user).
2126 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
2127 } else {
2128 // A fat pointer and a DST lvalue have the same representation
2129 // just different types.
2130 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
2131 }
2132 }
2133
2134 _ => {
2135 span_bug!(
2136 expr.span,
2137 "deref invoked on expr of invalid type {:?}",
2138 datum.ty);
2139 }
2140 };
2141
2142 debug!("deref_once(expr={}, method_call={:?}, result={:?})",
2143 expr.id, method_call, r.datum);
2144
2145 return r;
2146 }
2147
2148 #[derive(Debug)]
2149 enum OverflowOp {
2150 Add,
2151 Sub,
2152 Mul,
2153 Shl,
2154 Shr,
2155 }
2156
2157 impl OverflowOp {
2158 fn codegen_strategy(&self) -> OverflowCodegen {
2159 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2160 match *self {
2161 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2162 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2163 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2164
2165 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2166 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2167 }
2168 }
2169 }
2170
2171 enum OverflowCodegen {
2172 ViaIntrinsic(OverflowOpViaIntrinsic),
2173 ViaInputCheck(OverflowOpViaInputCheck),
2174 }
2175
2176 enum OverflowOpViaInputCheck { Shl, Shr, }
2177
2178 #[derive(Debug)]
2179 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2180
2181 impl OverflowOpViaIntrinsic {
2182 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2183 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2184 bcx.ccx().get_intrinsic(&name)
2185 }
2186 fn to_intrinsic_name(&self, tcx: &TyCtxt, ty: Ty) -> &'static str {
2187 use syntax::ast::IntTy::*;
2188 use syntax::ast::UintTy::*;
2189 use rustc::ty::{TyInt, TyUint};
2190
2191 let new_sty = match ty.sty {
2192 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
2193 "32" => TyInt(I32),
2194 "64" => TyInt(I64),
2195 _ => bug!("unsupported target word size")
2196 },
2197 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
2198 "32" => TyUint(U32),
2199 "64" => TyUint(U64),
2200 _ => bug!("unsupported target word size")
2201 },
2202 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
2203 _ => bug!("tried to get overflow intrinsic for {:?} applied to non-int type",
2204 *self)
2205 };
2206
2207 match *self {
2208 OverflowOpViaIntrinsic::Add => match new_sty {
2209 TyInt(I8) => "llvm.sadd.with.overflow.i8",
2210 TyInt(I16) => "llvm.sadd.with.overflow.i16",
2211 TyInt(I32) => "llvm.sadd.with.overflow.i32",
2212 TyInt(I64) => "llvm.sadd.with.overflow.i64",
2213
2214 TyUint(U8) => "llvm.uadd.with.overflow.i8",
2215 TyUint(U16) => "llvm.uadd.with.overflow.i16",
2216 TyUint(U32) => "llvm.uadd.with.overflow.i32",
2217 TyUint(U64) => "llvm.uadd.with.overflow.i64",
2218
2219 _ => bug!(),
2220 },
2221 OverflowOpViaIntrinsic::Sub => match new_sty {
2222 TyInt(I8) => "llvm.ssub.with.overflow.i8",
2223 TyInt(I16) => "llvm.ssub.with.overflow.i16",
2224 TyInt(I32) => "llvm.ssub.with.overflow.i32",
2225 TyInt(I64) => "llvm.ssub.with.overflow.i64",
2226
2227 TyUint(U8) => "llvm.usub.with.overflow.i8",
2228 TyUint(U16) => "llvm.usub.with.overflow.i16",
2229 TyUint(U32) => "llvm.usub.with.overflow.i32",
2230 TyUint(U64) => "llvm.usub.with.overflow.i64",
2231
2232 _ => bug!(),
2233 },
2234 OverflowOpViaIntrinsic::Mul => match new_sty {
2235 TyInt(I8) => "llvm.smul.with.overflow.i8",
2236 TyInt(I16) => "llvm.smul.with.overflow.i16",
2237 TyInt(I32) => "llvm.smul.with.overflow.i32",
2238 TyInt(I64) => "llvm.smul.with.overflow.i64",
2239
2240 TyUint(U8) => "llvm.umul.with.overflow.i8",
2241 TyUint(U16) => "llvm.umul.with.overflow.i16",
2242 TyUint(U32) => "llvm.umul.with.overflow.i32",
2243 TyUint(U64) => "llvm.umul.with.overflow.i64",
2244
2245 _ => bug!(),
2246 },
2247 }
2248 }
2249
2250 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2251 info: NodeIdAndSpan,
2252 lhs_t: Ty<'tcx>, lhs: ValueRef,
2253 rhs: ValueRef,
2254 binop_debug_loc: DebugLoc)
2255 -> (Block<'blk, 'tcx>, ValueRef) {
2256 let llfn = self.to_intrinsic(bcx, lhs_t);
2257
2258 let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
2259 let result = ExtractValue(bcx, val, 0); // iN operation result
2260 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2261
2262 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2263 binop_debug_loc);
2264
2265 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2266 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2267 binop_debug_loc);
2268
2269 let bcx =
2270 base::with_cond(bcx, cond, |bcx|
2271 controlflow::trans_fail(bcx, info,
2272 InternedString::new("arithmetic operation overflowed")));
2273
2274 (bcx, result)
2275 }
2276 }
2277
2278 impl OverflowOpViaInputCheck {
2279 fn build_with_input_check<'blk, 'tcx>(&self,
2280 bcx: Block<'blk, 'tcx>,
2281 info: NodeIdAndSpan,
2282 lhs_t: Ty<'tcx>,
2283 lhs: ValueRef,
2284 rhs: ValueRef,
2285 binop_debug_loc: DebugLoc)
2286 -> (Block<'blk, 'tcx>, ValueRef)
2287 {
2288 let lhs_llty = val_ty(lhs);
2289 let rhs_llty = val_ty(rhs);
2290
2291 // Panic if any bits are set outside of bits that we always
2292 // mask in.
2293 //
2294 // Note that the mask's value is derived from the LHS type
2295 // (since that is where the 32/64 distinction is relevant) but
2296 // the mask's type must match the RHS type (since they will
2297 // both be fed into an and-binop)
2298 let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
2299
2300 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2301 let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
2302 let result = match *self {
2303 OverflowOpViaInputCheck::Shl =>
2304 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2305 OverflowOpViaInputCheck::Shr =>
2306 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2307 };
2308 let bcx =
2309 base::with_cond(bcx, cond, |bcx|
2310 controlflow::trans_fail(bcx, info,
2311 InternedString::new("shift operation overflowed")));
2312
2313 (bcx, result)
2314 }
2315 }
2316
2317 // Check if an integer or vector contains a nonzero element.
2318 fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2319 value: ValueRef,
2320 binop_debug_loc: DebugLoc) -> ValueRef {
2321 let llty = val_ty(value);
2322 let kind = llty.kind();
2323 match kind {
2324 TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
2325 TypeKind::Vector => {
2326 // Check if any elements of the vector are nonzero by treating
2327 // it as a wide integer and checking if the integer is nonzero.
2328 let width = llty.vector_length() as u64 * llty.element_type().int_width();
2329 let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
2330 build_nonzero_check(bcx, int_value, binop_debug_loc)
2331 },
2332 _ => bug!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
2333 }
2334 }
2335
2336 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2337 lhs_t: Ty<'tcx>, lhs: ValueRef,
2338 rhs: ValueRef,
2339 binop_debug_loc: DebugLoc)
2340 -> (Block<'blk, 'tcx>, ValueRef) {
2341 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2342 if bcx.ccx().check_overflow() {
2343
2344 match oop.codegen_strategy() {
2345 OverflowCodegen::ViaIntrinsic(oop) =>
2346 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2347 OverflowCodegen::ViaInputCheck(oop) =>
2348 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2349 }
2350 } else {
2351 let res = match oop {
2352 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2353 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2354 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2355
2356 OverflowOp::Shl =>
2357 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2358 OverflowOp::Shr =>
2359 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2360 };
2361 (bcx, res)
2362 }
2363 }
2364
2365 /// We categorize expressions into three kinds. The distinction between
2366 /// lvalue/rvalue is fundamental to the language. The distinction between the
2367 /// two kinds of rvalues is an artifact of trans which reflects how we will
2368 /// generate code for that kind of expression. See trans/expr.rs for more
2369 /// information.
2370 #[derive(Copy, Clone)]
2371 enum ExprKind {
2372 Lvalue,
2373 RvalueDps,
2374 RvalueDatum,
2375 RvalueStmt
2376 }
2377
2378 fn expr_kind(tcx: &TyCtxt, expr: &hir::Expr) -> ExprKind {
2379 if tcx.is_method_call(expr.id) {
2380 // Overloaded operations are generally calls, and hence they are
2381 // generated via DPS, but there are a few exceptions:
2382 return match expr.node {
2383 // `a += b` has a unit result.
2384 hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
2385
2386 // the deref method invoked for `*a` always yields an `&T`
2387 hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
2388
2389 // the index method invoked for `a[i]` always yields an `&T`
2390 hir::ExprIndex(..) => ExprKind::Lvalue,
2391
2392 // in the general case, result could be any type, use DPS
2393 _ => ExprKind::RvalueDps
2394 };
2395 }
2396
2397 match expr.node {
2398 hir::ExprPath(..) => {
2399 match tcx.resolve_expr(expr) {
2400 // Put functions and ctors with the ADTs, as they
2401 // are zero-sized, so DPS is the cheapest option.
2402 Def::Struct(..) | Def::Variant(..) |
2403 Def::Fn(..) | Def::Method(..) => {
2404 ExprKind::RvalueDps
2405 }
2406
2407 // Note: there is actually a good case to be made that
2408 // DefArg's, particularly those of immediate type, ought to
2409 // considered rvalues.
2410 Def::Static(..) |
2411 Def::Upvar(..) |
2412 Def::Local(..) => ExprKind::Lvalue,
2413
2414 Def::Const(..) |
2415 Def::AssociatedConst(..) => ExprKind::RvalueDatum,
2416
2417 def => {
2418 span_bug!(
2419 expr.span,
2420 "uncategorized def for expr {}: {:?}",
2421 expr.id,
2422 def);
2423 }
2424 }
2425 }
2426
2427 hir::ExprType(ref expr, _) => {
2428 expr_kind(tcx, expr)
2429 }
2430
2431 hir::ExprUnary(hir::UnDeref, _) |
2432 hir::ExprField(..) |
2433 hir::ExprTupField(..) |
2434 hir::ExprIndex(..) => {
2435 ExprKind::Lvalue
2436 }
2437
2438 hir::ExprCall(..) |
2439 hir::ExprMethodCall(..) |
2440 hir::ExprStruct(..) |
2441 hir::ExprTup(..) |
2442 hir::ExprIf(..) |
2443 hir::ExprMatch(..) |
2444 hir::ExprClosure(..) |
2445 hir::ExprBlock(..) |
2446 hir::ExprRepeat(..) |
2447 hir::ExprVec(..) => {
2448 ExprKind::RvalueDps
2449 }
2450
2451 hir::ExprLit(ref lit) if lit.node.is_str() => {
2452 ExprKind::RvalueDps
2453 }
2454
2455 hir::ExprBreak(..) |
2456 hir::ExprAgain(..) |
2457 hir::ExprRet(..) |
2458 hir::ExprWhile(..) |
2459 hir::ExprLoop(..) |
2460 hir::ExprAssign(..) |
2461 hir::ExprInlineAsm(..) |
2462 hir::ExprAssignOp(..) => {
2463 ExprKind::RvalueStmt
2464 }
2465
2466 hir::ExprLit(_) | // Note: LitStr is carved out above
2467 hir::ExprUnary(..) |
2468 hir::ExprBox(_) |
2469 hir::ExprAddrOf(..) |
2470 hir::ExprBinary(..) |
2471 hir::ExprCast(..) => {
2472 ExprKind::RvalueDatum
2473 }
2474 }
2475 }