]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/expr.rs
Imported Upstream version 1.8.0+dfsg1
[rustc.git] / src / librustc_trans / trans / expr.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! # Translation of Expressions
12 //!
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
20 //!
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
24 //!
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
31 //!
32 //! Public entry points:
33 //!
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
36 //! can manage it.
37 //!
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
41 //! structural type.
42 //!
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
46 //!
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
48
49 #![allow(non_camel_case_types)]
50
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
53
54 use back::abi;
55 use llvm::{self, ValueRef, TypeKind};
56 use middle::const_qualif::ConstQualif;
57 use middle::def::Def;
58 use middle::subst::Substs;
59 use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
60 use trans::base::*;
61 use trans::build::*;
62 use trans::cleanup::{self, CleanupMethods, DropHintMethods};
63 use trans::common::*;
64 use trans::datum::*;
65 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
66 use trans::declare;
67 use trans::glue;
68 use trans::machine;
69 use trans::meth;
70 use trans::tvec;
71 use trans::type_of;
72 use trans::Disr;
73 use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
74 use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
75 use middle::ty::adjustment::CustomCoerceUnsized;
76 use middle::ty::{self, Ty};
77 use middle::ty::MethodCall;
78 use middle::ty::cast::{CastKind, CastTy};
79 use util::common::indenter;
80 use trans::machine::{llsize_of, llsize_of_alloc};
81 use trans::type_::Type;
82
83 use rustc_front;
84 use rustc_front::hir;
85
86 use syntax::{ast, codemap};
87 use syntax::parse::token::InternedString;
88 use syntax::ptr::P;
89 use syntax::parse::token;
90 use std::mem;
91
92 // Destinations
93
94 // These are passed around by the code generating functions to track the
95 // destination of a computation's value.
96
97 #[derive(Copy, Clone, PartialEq)]
98 pub enum Dest {
99 SaveIn(ValueRef),
100 Ignore,
101 }
102
103 impl Dest {
104 pub fn to_string(&self, ccx: &CrateContext) -> String {
105 match *self {
106 SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
107 Ignore => "Ignore".to_string()
108 }
109 }
110 }
111
112 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
113 /// better optimized LLVM code.
114 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
115 expr: &hir::Expr,
116 dest: Dest)
117 -> Block<'blk, 'tcx> {
118 let mut bcx = bcx;
119
120 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
121
122 if adjustment_required(bcx, expr) {
123 // use trans, which may be less efficient but
124 // which will perform the adjustments:
125 let datum = unpack_datum!(bcx, trans(bcx, expr));
126 return datum.store_to_dest(bcx, dest, expr.id);
127 }
128
129 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
130 if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
131 if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
132 if let SaveIn(lldest) = dest {
133 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
134 bcx.fcx.param_substs,
135 consts::TrueConst::No) {
136 Ok(global) => {
137 // Cast pointer to destination, because constants
138 // have different types.
139 let lldest = PointerCast(bcx, lldest, val_ty(global));
140 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
141 return bcx;
142 },
143 Err(consts::ConstEvalFailure::Runtime(_)) => {
144 // in case const evaluation errors, translate normally
145 // debug assertions catch the same errors
146 // see RFC 1229
147 },
148 Err(consts::ConstEvalFailure::Compiletime(_)) => {
149 return bcx;
150 },
151 }
152 }
153
154 // If we see a const here, that's because it evaluates to a type with zero size. We
155 // should be able to just discard it, since const expressions are guaranteed not to
156 // have side effects. This seems to be reached through tuple struct constructors being
157 // passed zero-size constants.
158 if let hir::ExprPath(..) = expr.node {
159 match bcx.def(expr.id) {
160 Def::Const(_) | Def::AssociatedConst(_) => {
161 assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
162 return bcx;
163 }
164 _ => {}
165 }
166 }
167
168 // Even if we don't have a value to emit, and the expression
169 // doesn't have any side-effects, we still have to translate the
170 // body of any closures.
171 // FIXME: Find a better way of handling this case.
172 } else {
173 // The only way we're going to see a `const` at this point is if
174 // it prefers in-place instantiation, likely because it contains
175 // `[x; N]` somewhere within.
176 match expr.node {
177 hir::ExprPath(..) => {
178 match bcx.def(expr.id) {
179 Def::Const(did) | Def::AssociatedConst(did) => {
180 let empty_substs = bcx.tcx().mk_substs(Substs::trans_empty());
181 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
182 empty_substs);
183 // Temporarily get cleanup scopes out of the way,
184 // as they require sub-expressions to be contained
185 // inside the current AST scope.
186 // These should record no cleanups anyways, `const`
187 // can't have destructors.
188 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
189 vec![]);
190 // Lock emitted debug locations to the location of
191 // the constant reference expression.
192 debuginfo::with_source_location_override(bcx.fcx,
193 expr.debug_loc(),
194 || {
195 bcx = trans_into(bcx, const_expr, dest)
196 });
197 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
198 scopes);
199 assert!(scopes.is_empty());
200 return bcx;
201 }
202 _ => {}
203 }
204 }
205 _ => {}
206 }
207 }
208 }
209
210 debug!("trans_into() expr={:?}", expr);
211
212 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
213 expr.id,
214 expr.span,
215 false);
216 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
217
218 let kind = expr_kind(bcx.tcx(), expr);
219 bcx = match kind {
220 ExprKind::Lvalue | ExprKind::RvalueDatum => {
221 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
222 }
223 ExprKind::RvalueDps => {
224 trans_rvalue_dps_unadjusted(bcx, expr, dest)
225 }
226 ExprKind::RvalueStmt => {
227 trans_rvalue_stmt_unadjusted(bcx, expr)
228 }
229 };
230
231 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
232 }
233
234 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
235 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
236 /// stack.
237 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
238 expr: &hir::Expr)
239 -> DatumBlock<'blk, 'tcx, Expr> {
240 debug!("trans(expr={:?})", expr);
241
242 let mut bcx = bcx;
243 let fcx = bcx.fcx;
244 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
245 let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
246 let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
247 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
248 bcx.fcx.param_substs,
249 consts::TrueConst::No) {
250 Ok(global) => {
251 if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
252 // Is borrowed as 'static, must return lvalue.
253
254 // Cast pointer to global, because constants have different types.
255 let const_ty = expr_ty_adjusted(bcx, expr);
256 let llty = type_of::type_of(bcx.ccx(), const_ty);
257 let global = PointerCast(bcx, global, llty.ptr_to());
258 let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
259 return DatumBlock::new(bcx, datum.to_expr_datum());
260 }
261
262 // Otherwise, keep around and perform adjustments, if needed.
263 let const_ty = if adjusted_global {
264 expr_ty_adjusted(bcx, expr)
265 } else {
266 expr_ty(bcx, expr)
267 };
268
269 // This could use a better heuristic.
270 Some(if type_is_immediate(bcx.ccx(), const_ty) {
271 // Cast pointer to global, because constants have different types.
272 let llty = type_of::type_of(bcx.ccx(), const_ty);
273 let global = PointerCast(bcx, global, llty.ptr_to());
274 // Maybe just get the value directly, instead of loading it?
275 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
276 } else {
277 let scratch = alloc_ty(bcx, const_ty, "const");
278 call_lifetime_start(bcx, scratch);
279 let lldest = if !const_ty.is_structural() {
280 // Cast pointer to slot, because constants have different types.
281 PointerCast(bcx, scratch, val_ty(global))
282 } else {
283 // In this case, memcpy_ty calls llvm.memcpy after casting both
284 // source and destination to i8*, so we don't need any casts.
285 scratch
286 };
287 memcpy_ty(bcx, lldest, global, const_ty);
288 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
289 })
290 },
291 Err(consts::ConstEvalFailure::Runtime(_)) => {
292 // in case const evaluation errors, translate normally
293 // debug assertions catch the same errors
294 // see RFC 1229
295 None
296 },
297 Err(consts::ConstEvalFailure::Compiletime(_)) => {
298 // generate a dummy llvm value
299 let const_ty = expr_ty(bcx, expr);
300 let llty = type_of::type_of(bcx.ccx(), const_ty);
301 let dummy = C_undef(llty.ptr_to());
302 Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
303 },
304 }
305 } else {
306 None
307 };
308
309 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
310 expr.id,
311 expr.span,
312 false);
313 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
314 let datum = match global {
315 Some(rvalue) => rvalue.to_expr_datum(),
316 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
317 };
318 let datum = if adjusted_global {
319 datum // trans::consts already performed adjustments.
320 } else {
321 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
322 };
323 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
324 return DatumBlock::new(bcx, datum);
325 }
326
327 pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
328 StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
329 }
330
331 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
332 StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
333 }
334
335 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
336 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
337 Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
338 }
339
340 fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
341 expr: &hir::Expr) -> bool {
342 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
343 None => { return false; }
344 Some(adj) => adj
345 };
346
347 // Don't skip a conversion from Box<T> to &T, etc.
348 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
349 return true;
350 }
351
352 match adjustment {
353 AdjustReifyFnPointer => {
354 // FIXME(#19925) once fn item types are
355 // zero-sized, we'll need to return true here
356 false
357 }
358 AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
359 // purely a type-level thing
360 false
361 }
362 AdjustDerefRef(ref adj) => {
363 // We are a bit paranoid about adjustments and thus might have a re-
364 // borrow here which merely derefs and then refs again (it might have
365 // a different region or mutability, but we don't care here).
366 !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
367 }
368 }
369 }
370
371 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
372 /// translation of `expr`.
373 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
374 expr: &hir::Expr,
375 datum: Datum<'tcx, Expr>)
376 -> DatumBlock<'blk, 'tcx, Expr>
377 {
378 let mut bcx = bcx;
379 let mut datum = datum;
380 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
381 None => {
382 return DatumBlock::new(bcx, datum);
383 }
384 Some(adj) => { adj }
385 };
386 debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
387 expr,
388 datum.to_string(bcx.ccx()),
389 adjustment);
390 match adjustment {
391 AdjustReifyFnPointer => {
392 // FIXME(#19925) once fn item types are
393 // zero-sized, we'll need to do something here
394 }
395 AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
396 // purely a type-level thing
397 }
398 AdjustDerefRef(ref adj) => {
399 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
400 // We are a bit paranoid about adjustments and thus might have a re-
401 // borrow here which merely derefs and then refs again (it might have
402 // a different region or mutability, but we don't care here).
403 match datum.ty.sty {
404 // Don't skip a conversion from Box<T> to &T, etc.
405 ty::TyRef(..) => {
406 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
407 // Don't skip an overloaded deref.
408 0
409 } else {
410 1
411 }
412 }
413 _ => 0
414 }
415 } else {
416 0
417 };
418
419 if adj.autoderefs > skip_reborrows {
420 // Schedule cleanup.
421 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
422 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
423 lval.to_expr_datum(),
424 adj.autoderefs - skip_reborrows));
425 }
426
427 // (You might think there is a more elegant way to do this than a
428 // skip_reborrows bool, but then you remember that the borrow checker exists).
429 if skip_reborrows == 0 && adj.autoref.is_some() {
430 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
431 }
432
433 if let Some(target) = adj.unsize {
434 // We do not arrange cleanup ourselves; if we already are an
435 // L-value, then cleanup will have already been scheduled (and
436 // the `datum.to_rvalue_datum` call below will emit code to zero
437 // the drop flag when moving out of the L-value). If we are an
438 // R-value, then we do not need to schedule cleanup.
439 let source_datum = unpack_datum!(bcx,
440 datum.to_rvalue_datum(bcx, "__coerce_source"));
441
442 let target = bcx.monomorphize(&target);
443
444 let scratch = alloc_ty(bcx, target, "__coerce_target");
445 call_lifetime_start(bcx, scratch);
446 let target_datum = Datum::new(scratch, target,
447 Rvalue::new(ByRef));
448 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
449 datum = Datum::new(scratch, target,
450 RvalueExpr(Rvalue::new(ByRef)));
451 }
452 }
453 }
454 debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
455 DatumBlock::new(bcx, datum)
456 }
457
458 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
459 span: codemap::Span,
460 source: Datum<'tcx, Rvalue>,
461 target: Datum<'tcx, Rvalue>)
462 -> Block<'blk, 'tcx> {
463 let mut bcx = bcx;
464 debug!("coerce_unsized({} -> {})",
465 source.to_string(bcx.ccx()),
466 target.to_string(bcx.ccx()));
467
468 match (&source.ty.sty, &target.ty.sty) {
469 (&ty::TyBox(a), &ty::TyBox(b)) |
470 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
471 &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
472 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
473 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
474 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
475 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
476 let (inner_source, inner_target) = (a, b);
477
478 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
479 // Normally, the source is a thin pointer and we are
480 // adding extra info to make a fat pointer. The exception
481 // is when we are upcasting an existing object fat pointer
482 // to use a different vtable. In that case, we want to
483 // load out the original data pointer so we can repackage
484 // it.
485 (Load(bcx, get_dataptr(bcx, source.val)),
486 Some(Load(bcx, get_meta(bcx, source.val))))
487 } else {
488 let val = if source.kind.is_by_ref() {
489 load_ty(bcx, source.val, source.ty)
490 } else {
491 source.val
492 };
493 (val, None)
494 };
495
496 let info = unsized_info(bcx.ccx(), inner_source, inner_target,
497 old_info, bcx.fcx.param_substs);
498
499 // Compute the base pointer. This doesn't change the pointer value,
500 // but merely its type.
501 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
502 let base = PointerCast(bcx, base, ptr_ty);
503
504 Store(bcx, base, get_dataptr(bcx, target.val));
505 Store(bcx, info, get_meta(bcx, target.val));
506 }
507
508 // This can be extended to enums and tuples in the future.
509 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
510 (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
511 assert_eq!(def_id_a, def_id_b);
512
513 // The target is already by-ref because it's to be written to.
514 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
515 assert!(target.kind.is_by_ref());
516
517 let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
518
519 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
520 let src_fields = match &*repr_source {
521 &adt::Repr::Univariant(ref s, _) => &s.fields,
522 _ => bcx.sess().span_bug(span,
523 &format!("Non univariant struct? (repr_source: {:?})",
524 repr_source)),
525 };
526 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
527 let target_fields = match &*repr_target {
528 &adt::Repr::Univariant(ref s, _) => &s.fields,
529 _ => bcx.sess().span_bug(span,
530 &format!("Non univariant struct? (repr_target: {:?})",
531 repr_target)),
532 };
533
534 let coerce_index = match kind {
535 CustomCoerceUnsized::Struct(i) => i
536 };
537 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
538
539 let source_val = adt::MaybeSizedValue::sized(source.val);
540 let target_val = adt::MaybeSizedValue::sized(target.val);
541
542 let iter = src_fields.iter().zip(target_fields).enumerate();
543 for (i, (src_ty, target_ty)) in iter {
544 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
545 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
546
547 // If this is the field we need to coerce, recurse on it.
548 if i == coerce_index {
549 coerce_unsized(bcx, span,
550 Datum::new(ll_source, src_ty,
551 Rvalue::new(ByRef)),
552 Datum::new(ll_target, target_ty,
553 Rvalue::new(ByRef)));
554 } else {
555 // Otherwise, simply copy the data from the source.
556 assert!(src_ty.is_phantom_data() || src_ty == target_ty);
557 memcpy_ty(bcx, ll_target, ll_source, src_ty);
558 }
559 }
560 }
561 _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
562 source.ty,
563 target.ty))
564 }
565 bcx
566 }
567
568 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
569 /// that the expr represents.
570 ///
571 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
572 /// something like `x().f` is translated into roughly the equivalent of
573 ///
574 /// { tmp = x(); tmp.f }
575 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
576 expr: &hir::Expr,
577 name: &str)
578 -> DatumBlock<'blk, 'tcx, Lvalue> {
579 let mut bcx = bcx;
580 let datum = unpack_datum!(bcx, trans(bcx, expr));
581 return datum.to_lvalue_datum(bcx, name, expr.id);
582 }
583
584 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
585 /// directly.
586 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
587 expr: &hir::Expr)
588 -> DatumBlock<'blk, 'tcx, Expr> {
589 let mut bcx = bcx;
590
591 debug!("trans_unadjusted(expr={:?})", expr);
592 let _indenter = indenter();
593
594 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
595
596 return match expr_kind(bcx.tcx(), expr) {
597 ExprKind::Lvalue | ExprKind::RvalueDatum => {
598 let datum = unpack_datum!(bcx, {
599 trans_datum_unadjusted(bcx, expr)
600 });
601
602 DatumBlock {bcx: bcx, datum: datum}
603 }
604
605 ExprKind::RvalueStmt => {
606 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
607 nil(bcx, expr_ty(bcx, expr))
608 }
609
610 ExprKind::RvalueDps => {
611 let ty = expr_ty(bcx, expr);
612 if type_is_zero_size(bcx.ccx(), ty) {
613 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
614 nil(bcx, ty)
615 } else {
616 let scratch = rvalue_scratch_datum(bcx, ty, "");
617 bcx = trans_rvalue_dps_unadjusted(
618 bcx, expr, SaveIn(scratch.val));
619
620 // Note: this is not obviously a good idea. It causes
621 // immediate values to be loaded immediately after a
622 // return from a call or other similar expression,
623 // which in turn leads to alloca's having shorter
624 // lifetimes and hence larger stack frames. However,
625 // in turn it can lead to more register pressure.
626 // Still, in practice it seems to increase
627 // performance, since we have fewer problems with
628 // morestack churn.
629 let scratch = unpack_datum!(
630 bcx, scratch.to_appropriate_datum(bcx));
631
632 DatumBlock::new(bcx, scratch.to_expr_datum())
633 }
634 }
635 };
636
637 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
638 -> DatumBlock<'blk, 'tcx, Expr> {
639 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
640 let datum = immediate_rvalue(llval, ty);
641 DatumBlock::new(bcx, datum.to_expr_datum())
642 }
643 }
644
645 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
646 expr: &hir::Expr)
647 -> DatumBlock<'blk, 'tcx, Expr> {
648 let mut bcx = bcx;
649 let fcx = bcx.fcx;
650 let _icx = push_ctxt("trans_datum_unadjusted");
651
652 match expr.node {
653 hir::ExprType(ref e, _) => {
654 trans(bcx, &e)
655 }
656 hir::ExprPath(..) => {
657 trans_def(bcx, expr, bcx.def(expr.id))
658 }
659 hir::ExprField(ref base, name) => {
660 trans_rec_field(bcx, &base, name.node)
661 }
662 hir::ExprTupField(ref base, idx) => {
663 trans_rec_tup_field(bcx, &base, idx.node)
664 }
665 hir::ExprIndex(ref base, ref idx) => {
666 trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
667 }
668 hir::ExprBox(ref contents) => {
669 // Special case for `Box<T>`
670 let box_ty = expr_ty(bcx, expr);
671 let contents_ty = expr_ty(bcx, &contents);
672 match box_ty.sty {
673 ty::TyBox(..) => {
674 trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
675 }
676 _ => bcx.sess().span_bug(expr.span,
677 "expected unique box")
678 }
679
680 }
681 hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
682 hir::ExprBinary(op, ref lhs, ref rhs) => {
683 trans_binary(bcx, expr, op, &lhs, &rhs)
684 }
685 hir::ExprUnary(op, ref x) => {
686 trans_unary(bcx, expr, op, &x)
687 }
688 hir::ExprAddrOf(_, ref x) => {
689 match x.node {
690 hir::ExprRepeat(..) | hir::ExprVec(..) => {
691 // Special case for slices.
692 let cleanup_debug_loc =
693 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
694 x.id,
695 x.span,
696 false);
697 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
698 let datum = unpack_datum!(
699 bcx, tvec::trans_slice_vec(bcx, expr, &x));
700 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
701 DatumBlock::new(bcx, datum)
702 }
703 _ => {
704 trans_addr_of(bcx, expr, &x)
705 }
706 }
707 }
708 hir::ExprCast(ref val, _) => {
709 // Datum output mode means this is a scalar cast:
710 trans_imm_cast(bcx, &val, expr.id)
711 }
712 _ => {
713 bcx.tcx().sess.span_bug(
714 expr.span,
715 &format!("trans_rvalue_datum_unadjusted reached \
716 fall-through case: {:?}",
717 expr.node));
718 }
719 }
720 }
721
722 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
723 base: &hir::Expr,
724 get_idx: F)
725 -> DatumBlock<'blk, 'tcx, Expr> where
726 F: FnOnce(&'blk ty::ctxt<'tcx>, &VariantInfo<'tcx>) -> usize,
727 {
728 let mut bcx = bcx;
729 let _icx = push_ctxt("trans_rec_field");
730
731 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
732 let bare_ty = base_datum.ty;
733 let repr = adt::represent_type(bcx.ccx(), bare_ty);
734 let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
735
736 let ix = get_idx(bcx.tcx(), &vinfo);
737 let d = base_datum.get_element(
738 bcx,
739 vinfo.fields[ix].1,
740 |srcval| {
741 adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
742 });
743
744 if type_is_sized(bcx.tcx(), d.ty) {
745 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
746 } else {
747 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
748 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
749 let info = Load(bcx, get_meta(bcx, base_datum.val));
750 Store(bcx, info, get_meta(bcx, scratch.val));
751
752 // Always generate an lvalue datum, because this pointer doesn't own
753 // the data and cleanup is scheduled elsewhere.
754 DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
755 }
756 }
757
758 /// Translates `base.field`.
759 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
760 base: &hir::Expr,
761 field: ast::Name)
762 -> DatumBlock<'blk, 'tcx, Expr> {
763 trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
764 }
765
766 /// Translates `base.<idx>`.
767 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
768 base: &hir::Expr,
769 idx: usize)
770 -> DatumBlock<'blk, 'tcx, Expr> {
771 trans_field(bcx, base, |_, _| idx)
772 }
773
774 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
775 index_expr: &hir::Expr,
776 base: &hir::Expr,
777 idx: &hir::Expr,
778 method_call: MethodCall)
779 -> DatumBlock<'blk, 'tcx, Expr> {
780 //! Translates `base[idx]`.
781
782 let _icx = push_ctxt("trans_index");
783 let ccx = bcx.ccx();
784 let mut bcx = bcx;
785
786 let index_expr_debug_loc = index_expr.debug_loc();
787
788 // Check for overloaded index.
789 let method_ty = ccx.tcx()
790 .tables
791 .borrow()
792 .method_map
793 .get(&method_call)
794 .map(|method| method.ty);
795 let elt_datum = match method_ty {
796 Some(method_ty) => {
797 let method_ty = monomorphize_type(bcx, method_ty);
798
799 let base_datum = unpack_datum!(bcx, trans(bcx, base));
800
801 // Translate index expression.
802 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
803
804 let ref_ty = // invoked methods have LB regions instantiated:
805 bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
806 let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
807 None => {
808 bcx.tcx().sess.span_bug(index_expr.span,
809 "index method didn't return a \
810 dereferenceable type?!")
811 }
812 Some(elt_tm) => elt_tm.ty,
813 };
814
815 // Overloaded. Evaluate `trans_overloaded_op`, which will
816 // invoke the user's index() method, which basically yields
817 // a `&T` pointer. We can then proceed down the normal
818 // path (below) to dereference that `&T`.
819 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
820 unpack_result!(bcx,
821 trans_overloaded_op(bcx,
822 index_expr,
823 method_call,
824 base_datum,
825 Some((ix_datum, idx.id)),
826 Some(SaveIn(scratch.val)),
827 false));
828 let datum = scratch.to_expr_datum();
829 let lval = Lvalue::new("expr::trans_index overload");
830 if type_is_sized(bcx.tcx(), elt_ty) {
831 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
832 } else {
833 Datum::new(datum.val, elt_ty, LvalueExpr(lval))
834 }
835 }
836 None => {
837 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
838 base,
839 "index"));
840
841 // Translate index expression and cast to a suitable LLVM integer.
842 // Rust is less strict than LLVM in this regard.
843 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
844 let ix_val = ix_datum.to_llscalarish(bcx);
845 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
846 val_ty(ix_val));
847 let int_size = machine::llbitsize_of_real(bcx.ccx(),
848 ccx.int_type());
849 let ix_val = {
850 if ix_size < int_size {
851 if expr_ty(bcx, idx).is_signed() {
852 SExt(bcx, ix_val, ccx.int_type())
853 } else { ZExt(bcx, ix_val, ccx.int_type()) }
854 } else if ix_size > int_size {
855 Trunc(bcx, ix_val, ccx.int_type())
856 } else {
857 ix_val
858 }
859 };
860
861 let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
862
863 let (base, len) = base_datum.get_vec_base_and_len(bcx);
864
865 debug!("trans_index: base {}", bcx.val_to_string(base));
866 debug!("trans_index: len {}", bcx.val_to_string(len));
867
868 let bounds_check = ICmp(bcx,
869 llvm::IntUGE,
870 ix_val,
871 len,
872 index_expr_debug_loc);
873 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
874 let expected = Call(bcx,
875 expect,
876 &[bounds_check, C_bool(ccx, false)],
877 None,
878 index_expr_debug_loc);
879 bcx = with_cond(bcx, expected, |bcx| {
880 controlflow::trans_fail_bounds_check(bcx,
881 expr_info(index_expr),
882 ix_val,
883 len)
884 });
885 let elt = InBoundsGEP(bcx, base, &[ix_val]);
886 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
887 let lval = Lvalue::new("expr::trans_index fallback");
888 Datum::new(elt, unit_ty, LvalueExpr(lval))
889 }
890 };
891
892 DatumBlock::new(bcx, elt_datum)
893 }
894
895 fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
896 ref_expr: &hir::Expr,
897 def: Def)
898 -> DatumBlock<'blk, 'tcx, Expr> {
899 //! Translates a reference to a path.
900
901 let _icx = push_ctxt("trans_def_lvalue");
902 match def {
903 Def::Fn(..) | Def::Method(..) |
904 Def::Struct(..) | Def::Variant(..) => {
905 let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
906 bcx.fcx.param_substs);
907 DatumBlock::new(bcx, datum.to_expr_datum())
908 }
909 Def::Static(did, _) => {
910 let const_ty = expr_ty(bcx, ref_expr);
911 let val = get_static_val(bcx.ccx(), did, const_ty);
912 let lval = Lvalue::new("expr::trans_def");
913 DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
914 }
915 Def::Const(_) | Def::AssociatedConst(_) => {
916 bcx.sess().span_bug(ref_expr.span,
917 "constant expression should not reach expr::trans_def")
918 }
919 _ => {
920 DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
921 }
922 }
923 }
924
925 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
926 expr: &hir::Expr)
927 -> Block<'blk, 'tcx> {
928 let mut bcx = bcx;
929 let _icx = push_ctxt("trans_rvalue_stmt");
930
931 if bcx.unreachable.get() {
932 return bcx;
933 }
934
935 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
936
937 match expr.node {
938 hir::ExprBreak(label_opt) => {
939 controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
940 }
941 hir::ExprType(ref e, _) => {
942 trans_into(bcx, &e, Ignore)
943 }
944 hir::ExprAgain(label_opt) => {
945 controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
946 }
947 hir::ExprRet(ref ex) => {
948 // Check to see if the return expression itself is reachable.
949 // This can occur when the inner expression contains a return
950 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
951 cfg.node_is_reachable(expr.id)
952 } else {
953 true
954 };
955
956 if reachable {
957 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
958 } else {
959 // If it's not reachable, just translate the inner expression
960 // directly. This avoids having to manage a return slot when
961 // it won't actually be used anyway.
962 if let &Some(ref x) = ex {
963 bcx = trans_into(bcx, &x, Ignore);
964 }
965 // Mark the end of the block as unreachable. Once we get to
966 // a return expression, there's no more we should be doing
967 // after this.
968 Unreachable(bcx);
969 bcx
970 }
971 }
972 hir::ExprWhile(ref cond, ref body, _) => {
973 controlflow::trans_while(bcx, expr, &cond, &body)
974 }
975 hir::ExprLoop(ref body, _) => {
976 controlflow::trans_loop(bcx, expr, &body)
977 }
978 hir::ExprAssign(ref dst, ref src) => {
979 let src_datum = unpack_datum!(bcx, trans(bcx, &src));
980 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
981
982 if bcx.fcx.type_needs_drop(dst_datum.ty) {
983 // If there are destructors involved, make sure we
984 // are copying from an rvalue, since that cannot possible
985 // alias an lvalue. We are concerned about code like:
986 //
987 // a = a
988 //
989 // but also
990 //
991 // a = a.b
992 //
993 // where e.g. a : Option<Foo> and a.b :
994 // Option<Foo>. In that case, freeing `a` before the
995 // assignment may also free `a.b`!
996 //
997 // We could avoid this intermediary with some analysis
998 // to determine whether `dst` may possibly own `src`.
999 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1000 let src_datum = unpack_datum!(
1001 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
1002 let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
1003 let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
1004
1005 // 1. Drop the data at the destination, passing the
1006 // drop-hint in case the lvalue has already been
1007 // dropped or moved.
1008 bcx = glue::drop_ty_core(bcx,
1009 dst_datum.val,
1010 dst_datum.ty,
1011 expr.debug_loc(),
1012 false,
1013 opt_hint_val);
1014
1015 // 2. We are overwriting the destination; ensure that
1016 // its drop-hint (if any) says "initialized."
1017 if let Some(hint_val) = opt_hint_val {
1018 let hint_llval = hint_val.value();
1019 let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
1020 Store(bcx, drop_needed, hint_llval);
1021 }
1022 src_datum.store_to(bcx, dst_datum.val)
1023 } else {
1024 src_datum.store_to(bcx, dst_datum.val)
1025 }
1026 }
1027 hir::ExprAssignOp(op, ref dst, ref src) => {
1028 let has_method_map = bcx.tcx()
1029 .tables
1030 .borrow()
1031 .method_map
1032 .contains_key(&MethodCall::expr(expr.id));
1033
1034 if has_method_map {
1035 let dst = unpack_datum!(bcx, trans(bcx, &dst));
1036 let src_datum = unpack_datum!(bcx, trans(bcx, &src));
1037 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), dst,
1038 Some((src_datum, src.id)), None, false).bcx
1039 } else {
1040 trans_assign_op(bcx, expr, op, &dst, &src)
1041 }
1042 }
1043 hir::ExprInlineAsm(ref a) => {
1044 asm::trans_inline_asm(bcx, a)
1045 }
1046 _ => {
1047 bcx.tcx().sess.span_bug(
1048 expr.span,
1049 &format!("trans_rvalue_stmt_unadjusted reached \
1050 fall-through case: {:?}",
1051 expr.node));
1052 }
1053 }
1054 }
1055
1056 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1057 expr: &hir::Expr,
1058 dest: Dest)
1059 -> Block<'blk, 'tcx> {
1060 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1061 let mut bcx = bcx;
1062 let tcx = bcx.tcx();
1063
1064 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1065
1066 match expr.node {
1067 hir::ExprType(ref e, _) => {
1068 trans_into(bcx, &e, dest)
1069 }
1070 hir::ExprPath(..) => {
1071 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1072 }
1073 hir::ExprIf(ref cond, ref thn, ref els) => {
1074 controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
1075 }
1076 hir::ExprMatch(ref discr, ref arms, _) => {
1077 _match::trans_match(bcx, expr, &discr, &arms[..], dest)
1078 }
1079 hir::ExprBlock(ref blk) => {
1080 controlflow::trans_block(bcx, &blk, dest)
1081 }
1082 hir::ExprStruct(_, ref fields, ref base) => {
1083 trans_struct(bcx,
1084 &fields[..],
1085 base.as_ref().map(|e| &**e),
1086 expr.span,
1087 expr.id,
1088 node_id_type(bcx, expr.id),
1089 dest)
1090 }
1091 hir::ExprRange(ref start, ref end) => {
1092 // FIXME it is just not right that we are synthesising ast nodes in
1093 // trans. Shudder.
1094 fn make_field(field_name: &str, expr: P<hir::Expr>) -> hir::Field {
1095 hir::Field {
1096 name: codemap::dummy_spanned(token::intern(field_name)),
1097 expr: expr,
1098 span: codemap::DUMMY_SP,
1099 }
1100 }
1101
1102 // A range just desugars into a struct.
1103 // Note that the type of the start and end may not be the same, but
1104 // they should only differ in their lifetime, which should not matter
1105 // in trans.
1106 let (did, fields, ty_params) = match (start, end) {
1107 (&Some(ref start), &Some(ref end)) => {
1108 // Desugar to Range
1109 let fields = vec![make_field("start", start.clone()),
1110 make_field("end", end.clone())];
1111 (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
1112 }
1113 (&Some(ref start), &None) => {
1114 // Desugar to RangeFrom
1115 let fields = vec![make_field("start", start.clone())];
1116 (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
1117 }
1118 (&None, &Some(ref end)) => {
1119 // Desugar to RangeTo
1120 let fields = vec![make_field("end", end.clone())];
1121 (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
1122 }
1123 _ => {
1124 // Desugar to RangeFull
1125 (tcx.lang_items.range_full_struct(), vec![], vec![])
1126 }
1127 };
1128
1129 if let Some(did) = did {
1130 let substs = Substs::new_type(ty_params, vec![]);
1131 trans_struct(bcx,
1132 &fields,
1133 None,
1134 expr.span,
1135 expr.id,
1136 tcx.mk_struct(tcx.lookup_adt_def(did),
1137 tcx.mk_substs(substs)),
1138 dest)
1139 } else {
1140 tcx.sess.span_bug(expr.span,
1141 "No lang item for ranges (how did we get this far?)")
1142 }
1143 }
1144 hir::ExprTup(ref args) => {
1145 let numbered_fields: Vec<(usize, &hir::Expr)> =
1146 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1147 trans_adt(bcx,
1148 expr_ty(bcx, expr),
1149 Disr(0),
1150 &numbered_fields[..],
1151 None,
1152 dest,
1153 expr.debug_loc())
1154 }
1155 hir::ExprLit(ref lit) => {
1156 match lit.node {
1157 ast::LitKind::Str(ref s, _) => {
1158 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1159 }
1160 _ => {
1161 bcx.tcx()
1162 .sess
1163 .span_bug(expr.span,
1164 "trans_rvalue_dps_unadjusted shouldn't be \
1165 translating this type of literal")
1166 }
1167 }
1168 }
1169 hir::ExprVec(..) | hir::ExprRepeat(..) => {
1170 tvec::trans_fixed_vstore(bcx, expr, dest)
1171 }
1172 hir::ExprClosure(_, ref decl, ref body) => {
1173 let dest = match dest {
1174 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1175 Ignore => closure::Dest::Ignore(bcx.ccx())
1176 };
1177
1178 // NB. To get the id of the closure, we don't use
1179 // `local_def_id(id)`, but rather we extract the closure
1180 // def-id from the expr's type. This is because this may
1181 // be an inlined expression from another crate, and we
1182 // want to get the ORIGINAL closure def-id, since that is
1183 // the key we need to find the closure-kind and
1184 // closure-type etc.
1185 let (def_id, substs) = match expr_ty(bcx, expr).sty {
1186 ty::TyClosure(def_id, ref substs) => (def_id, substs),
1187 ref t =>
1188 bcx.tcx().sess.span_bug(
1189 expr.span,
1190 &format!("closure expr without closure type: {:?}", t)),
1191 };
1192
1193 closure::trans_closure_expr(dest,
1194 decl,
1195 body,
1196 expr.id,
1197 def_id,
1198 substs,
1199 &expr.attrs).unwrap_or(bcx)
1200 }
1201 hir::ExprCall(ref f, ref args) => {
1202 if bcx.tcx().is_method_call(expr.id) {
1203 trans_overloaded_call(bcx,
1204 expr,
1205 &f,
1206 &args[..],
1207 Some(dest))
1208 } else {
1209 callee::trans_call(bcx,
1210 expr,
1211 &f,
1212 callee::ArgExprs(&args[..]),
1213 dest)
1214 }
1215 }
1216 hir::ExprMethodCall(_, _, ref args) => {
1217 callee::trans_method_call(bcx,
1218 expr,
1219 &args[0],
1220 callee::ArgExprs(&args[..]),
1221 dest)
1222 }
1223 hir::ExprBinary(op, ref lhs, ref rhs) => {
1224 // if not overloaded, would be RvalueDatumExpr
1225 let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
1226 let rhs_datum = unpack_datum!(bcx, trans(bcx, &rhs));
1227 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
1228 Some((rhs_datum, rhs.id)), Some(dest),
1229 !rustc_front::util::is_by_value_binop(op.node)).bcx
1230 }
1231 hir::ExprUnary(op, ref subexpr) => {
1232 // if not overloaded, would be RvalueDatumExpr
1233 let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
1234 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
1235 arg, None, Some(dest), !rustc_front::util::is_by_value_unop(op)).bcx
1236 }
1237 hir::ExprIndex(ref base, ref idx) => {
1238 // if not overloaded, would be RvalueDatumExpr
1239 let base = unpack_datum!(bcx, trans(bcx, &base));
1240 let idx_datum = unpack_datum!(bcx, trans(bcx, &idx));
1241 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
1242 Some((idx_datum, idx.id)), Some(dest), true).bcx
1243 }
1244 hir::ExprCast(..) => {
1245 // Trait casts used to come this way, now they should be coercions.
1246 bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
1247 }
1248 hir::ExprAssignOp(op, _, _) => {
1249 bcx.tcx().sess.span_bug(
1250 expr.span,
1251 &format!("augmented assignment `{}=` should always be a rvalue_stmt",
1252 rustc_front::util::binop_to_string(op.node)))
1253 }
1254 _ => {
1255 bcx.tcx().sess.span_bug(
1256 expr.span,
1257 &format!("trans_rvalue_dps_unadjusted reached fall-through \
1258 case: {:?}",
1259 expr.node));
1260 }
1261 }
1262 }
1263
1264 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1265 ref_expr: &hir::Expr,
1266 def: Def,
1267 dest: Dest)
1268 -> Block<'blk, 'tcx> {
1269 let _icx = push_ctxt("trans_def_dps_unadjusted");
1270
1271 let lldest = match dest {
1272 SaveIn(lldest) => lldest,
1273 Ignore => { return bcx; }
1274 };
1275
1276 match def {
1277 Def::Variant(tid, vid) => {
1278 let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
1279 if let ty::VariantKind::Tuple = variant.kind() {
1280 // N-ary variant.
1281 let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
1282 ExprId(ref_expr.id),
1283 bcx.fcx.param_substs).val;
1284 Store(bcx, llfn, lldest);
1285 return bcx;
1286 } else {
1287 // Nullary variant.
1288 let ty = expr_ty(bcx, ref_expr);
1289 let repr = adt::represent_type(bcx.ccx(), ty);
1290 adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
1291 return bcx;
1292 }
1293 }
1294 Def::Struct(..) => {
1295 let ty = expr_ty(bcx, ref_expr);
1296 match ty.sty {
1297 ty::TyStruct(def, _) if def.has_dtor() => {
1298 let repr = adt::represent_type(bcx.ccx(), ty);
1299 adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
1300 }
1301 _ => {}
1302 }
1303 bcx
1304 }
1305 _ => {
1306 bcx.tcx().sess.span_bug(ref_expr.span, &format!(
1307 "Non-DPS def {:?} referened by {}",
1308 def, bcx.node_id_to_string(ref_expr.id)));
1309 }
1310 }
1311 }
1312
1313 pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1314 ref_expr: &hir::Expr,
1315 def: Def,
1316 param_substs: &'tcx Substs<'tcx>)
1317 -> Datum<'tcx, Rvalue> {
1318 let _icx = push_ctxt("trans_def_datum_unadjusted");
1319
1320 match def {
1321 Def::Fn(did) |
1322 Def::Struct(did) | Def::Variant(_, did) => {
1323 callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
1324 }
1325 Def::Method(method_did) => {
1326 match ccx.tcx().impl_or_trait_item(method_did).container() {
1327 ty::ImplContainer(_) => {
1328 callee::trans_fn_ref(ccx, method_did,
1329 ExprId(ref_expr.id),
1330 param_substs)
1331 }
1332 ty::TraitContainer(trait_did) => {
1333 meth::trans_static_method_callee(ccx, method_did,
1334 trait_did, ref_expr.id,
1335 param_substs)
1336 }
1337 }
1338 }
1339 _ => {
1340 ccx.tcx().sess.span_bug(ref_expr.span, &format!(
1341 "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
1342 def,
1343 ref_expr));
1344 }
1345 }
1346 }
1347
1348 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1349 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1350 def: Def)
1351 -> Datum<'tcx, Lvalue> {
1352 let _icx = push_ctxt("trans_local_var");
1353
1354 match def {
1355 Def::Upvar(_, nid, _, _) => {
1356 // Can't move upvars, so this is never a ZeroMemLastUse.
1357 let local_ty = node_id_type(bcx, nid);
1358 let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
1359 bcx, nid, HintKind::ZeroAndMaintain);
1360 match bcx.fcx.llupvars.borrow().get(&nid) {
1361 Some(&val) => Datum::new(val, local_ty, lval),
1362 None => {
1363 bcx.sess().bug(&format!(
1364 "trans_local_var: no llval for upvar {} found",
1365 nid));
1366 }
1367 }
1368 }
1369 Def::Local(_, nid) => {
1370 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
1371 Some(&v) => v,
1372 None => {
1373 bcx.sess().bug(&format!(
1374 "trans_local_var: no datum for local/arg {} found",
1375 nid));
1376 }
1377 };
1378 debug!("take_local(nid={}, v={}, ty={})",
1379 nid, bcx.val_to_string(datum.val), datum.ty);
1380 datum
1381 }
1382 _ => {
1383 bcx.sess().unimpl(&format!(
1384 "unsupported def type in trans_local_var: {:?}",
1385 def));
1386 }
1387 }
1388 }
1389
1390 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1391 fields: &[hir::Field],
1392 base: Option<&hir::Expr>,
1393 expr_span: codemap::Span,
1394 expr_id: ast::NodeId,
1395 ty: Ty<'tcx>,
1396 dest: Dest) -> Block<'blk, 'tcx> {
1397 let _icx = push_ctxt("trans_rec");
1398
1399 let tcx = bcx.tcx();
1400 let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
1401
1402 let mut need_base = vec![true; vinfo.fields.len()];
1403
1404 let numbered_fields = fields.iter().map(|field| {
1405 let pos = vinfo.field_index(field.name.node);
1406 need_base[pos] = false;
1407 (pos, &*field.expr)
1408 }).collect::<Vec<_>>();
1409
1410 let optbase = match base {
1411 Some(base_expr) => {
1412 let mut leftovers = Vec::new();
1413 for (i, b) in need_base.iter().enumerate() {
1414 if *b {
1415 leftovers.push((i, vinfo.fields[i].1));
1416 }
1417 }
1418 Some(StructBaseInfo {expr: base_expr,
1419 fields: leftovers })
1420 }
1421 None => {
1422 if need_base.iter().any(|b| *b) {
1423 tcx.sess.span_bug(expr_span, "missing fields and no base expr")
1424 }
1425 None
1426 }
1427 };
1428
1429 trans_adt(bcx,
1430 ty,
1431 vinfo.discr,
1432 &numbered_fields,
1433 optbase,
1434 dest,
1435 DebugLoc::At(expr_id, expr_span))
1436 }
1437
1438 /// Information that `trans_adt` needs in order to fill in the fields
1439 /// of a struct copied from a base struct (e.g., from an expression
1440 /// like `Foo { a: b, ..base }`.
1441 ///
1442 /// Note that `fields` may be empty; the base expression must always be
1443 /// evaluated for side-effects.
1444 pub struct StructBaseInfo<'a, 'tcx> {
1445 /// The base expression; will be evaluated after all explicit fields.
1446 expr: &'a hir::Expr,
1447 /// The indices of fields to copy paired with their types.
1448 fields: Vec<(usize, Ty<'tcx>)>
1449 }
1450
1451 /// Constructs an ADT instance:
1452 ///
1453 /// - `fields` should be a list of field indices paired with the
1454 /// expression to store into that field. The initializers will be
1455 /// evaluated in the order specified by `fields`.
1456 ///
1457 /// - `optbase` contains information on the base struct (if any) from
1458 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1459 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1460 ty: Ty<'tcx>,
1461 discr: Disr,
1462 fields: &[(usize, &hir::Expr)],
1463 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1464 dest: Dest,
1465 debug_location: DebugLoc)
1466 -> Block<'blk, 'tcx> {
1467 let _icx = push_ctxt("trans_adt");
1468 let fcx = bcx.fcx;
1469 let repr = adt::represent_type(bcx.ccx(), ty);
1470
1471 debug_location.apply(bcx.fcx);
1472
1473 // If we don't care about the result, just make a
1474 // temporary stack slot
1475 let addr = match dest {
1476 SaveIn(pos) => pos,
1477 Ignore => {
1478 let llresult = alloc_ty(bcx, ty, "temp");
1479 call_lifetime_start(bcx, llresult);
1480 llresult
1481 }
1482 };
1483
1484 debug!("trans_adt");
1485
1486 // This scope holds intermediates that must be cleaned should
1487 // panic occur before the ADT as a whole is ready.
1488 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1489
1490 if ty.is_simd() {
1491 // Issue 23112: The original logic appeared vulnerable to same
1492 // order-of-eval bug. But, SIMD values are tuple-structs;
1493 // i.e. functional record update (FRU) syntax is unavailable.
1494 //
1495 // To be safe, double-check that we did not get here via FRU.
1496 assert!(optbase.is_none());
1497
1498 // This is the constructor of a SIMD type, such types are
1499 // always primitive machine types and so do not have a
1500 // destructor or require any clean-up.
1501 let llty = type_of::type_of(bcx.ccx(), ty);
1502
1503 // keep a vector as a register, and running through the field
1504 // `insertelement`ing them directly into that register
1505 // (i.e. avoid GEPi and `store`s to an alloca) .
1506 let mut vec_val = C_undef(llty);
1507
1508 for &(i, ref e) in fields {
1509 let block_datum = trans(bcx, &e);
1510 bcx = block_datum.bcx;
1511 let position = C_uint(bcx.ccx(), i);
1512 let value = block_datum.datum.to_llscalarish(bcx);
1513 vec_val = InsertElement(bcx, vec_val, value, position);
1514 }
1515 Store(bcx, vec_val, addr);
1516 } else if let Some(base) = optbase {
1517 // Issue 23112: If there is a base, then order-of-eval
1518 // requires field expressions eval'ed before base expression.
1519
1520 // First, trans field expressions to temporary scratch values.
1521 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1522 let datum = unpack_datum!(bcx, trans(bcx, &e));
1523 (i, datum)
1524 }).collect();
1525
1526 debug_location.apply(bcx.fcx);
1527
1528 // Second, trans the base to the dest.
1529 assert_eq!(discr, Disr(0));
1530
1531 let addr = adt::MaybeSizedValue::sized(addr);
1532 match expr_kind(bcx.tcx(), &base.expr) {
1533 ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
1534 bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
1535 },
1536 ExprKind::RvalueStmt => {
1537 bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
1538 }
1539 _ => {
1540 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
1541 for &(i, t) in &base.fields {
1542 let datum = base_datum.get_element(
1543 bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
1544 assert!(type_is_sized(bcx.tcx(), datum.ty));
1545 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1546 bcx = datum.store_to(bcx, dest);
1547 }
1548 }
1549 }
1550
1551 // Finally, move scratch field values into actual field locations
1552 for (i, datum) in scratch_vals {
1553 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1554 bcx = datum.store_to(bcx, dest);
1555 }
1556 } else {
1557 // No base means we can write all fields directly in place.
1558 let addr = adt::MaybeSizedValue::sized(addr);
1559 for &(i, ref e) in fields {
1560 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1561 let e_ty = expr_ty_adjusted(bcx, &e);
1562 bcx = trans_into(bcx, &e, SaveIn(dest));
1563 let scope = cleanup::CustomScope(custom_cleanup_scope);
1564 fcx.schedule_lifetime_end(scope, dest);
1565 // FIXME: nonzeroing move should generalize to fields
1566 fcx.schedule_drop_mem(scope, dest, e_ty, None);
1567 }
1568 }
1569
1570 adt::trans_set_discr(bcx, &repr, addr, discr);
1571
1572 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1573
1574 // If we don't care about the result drop the temporary we made
1575 match dest {
1576 SaveIn(_) => bcx,
1577 Ignore => {
1578 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1579 base::call_lifetime_end(bcx, addr);
1580 bcx
1581 }
1582 }
1583 }
1584
1585
1586 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1587 expr: &hir::Expr,
1588 lit: &ast::Lit)
1589 -> DatumBlock<'blk, 'tcx, Expr> {
1590 // must not be a string constant, that is a RvalueDpsExpr
1591 let _icx = push_ctxt("trans_immediate_lit");
1592 let ty = expr_ty(bcx, expr);
1593 let v = consts::const_lit(bcx.ccx(), expr, lit);
1594 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1595 }
1596
1597 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1598 expr: &hir::Expr,
1599 op: hir::UnOp,
1600 sub_expr: &hir::Expr)
1601 -> DatumBlock<'blk, 'tcx, Expr> {
1602 let ccx = bcx.ccx();
1603 let mut bcx = bcx;
1604 let _icx = push_ctxt("trans_unary_datum");
1605
1606 let method_call = MethodCall::expr(expr.id);
1607
1608 // The only overloaded operator that is translated to a datum
1609 // is an overloaded deref, since it is always yields a `&T`.
1610 // Otherwise, we should be in the RvalueDpsExpr path.
1611 assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
1612
1613 let un_ty = expr_ty(bcx, expr);
1614
1615 let debug_loc = expr.debug_loc();
1616
1617 match op {
1618 hir::UnNot => {
1619 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1620 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1621 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1622 }
1623 hir::UnNeg => {
1624 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1625 let val = datum.to_llscalarish(bcx);
1626 let (bcx, llneg) = {
1627 if un_ty.is_fp() {
1628 let result = FNeg(bcx, val, debug_loc);
1629 (bcx, result)
1630 } else {
1631 let is_signed = un_ty.is_signed();
1632 let result = Neg(bcx, val, debug_loc);
1633 let bcx = if bcx.ccx().check_overflow() && is_signed {
1634 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1635 let is_min = ICmp(bcx, llvm::IntEQ, val,
1636 C_integral(llty, min, true), debug_loc);
1637 with_cond(bcx, is_min, |bcx| {
1638 let msg = InternedString::new(
1639 "attempted to negate with overflow");
1640 controlflow::trans_fail(bcx, expr_info(expr), msg)
1641 })
1642 } else {
1643 bcx
1644 };
1645 (bcx, result)
1646 }
1647 };
1648 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1649 }
1650 hir::UnDeref => {
1651 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1652 deref_once(bcx, expr, datum, method_call)
1653 }
1654 }
1655 }
1656
1657 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1658 box_expr: &hir::Expr,
1659 box_ty: Ty<'tcx>,
1660 contents: &hir::Expr,
1661 contents_ty: Ty<'tcx>)
1662 -> DatumBlock<'blk, 'tcx, Expr> {
1663 let _icx = push_ctxt("trans_uniq_expr");
1664 let fcx = bcx.fcx;
1665 assert!(type_is_sized(bcx.tcx(), contents_ty));
1666 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1667 let size = llsize_of(bcx.ccx(), llty);
1668 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1669 let llty_ptr = llty.ptr_to();
1670 let Result { bcx, val } = malloc_raw_dyn(bcx,
1671 llty_ptr,
1672 box_ty,
1673 size,
1674 align,
1675 box_expr.debug_loc());
1676 // Unique boxes do not allocate for zero-size types. The standard library
1677 // may assume that `free` is never called on the pointer returned for
1678 // `Box<ZeroSizeType>`.
1679 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1680 trans_into(bcx, contents, SaveIn(val))
1681 } else {
1682 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1683 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1684 val, cleanup::HeapExchange, contents_ty);
1685 let bcx = trans_into(bcx, contents, SaveIn(val));
1686 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1687 bcx
1688 };
1689 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1690 }
1691
1692 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1693 expr: &hir::Expr,
1694 subexpr: &hir::Expr)
1695 -> DatumBlock<'blk, 'tcx, Expr> {
1696 let _icx = push_ctxt("trans_addr_of");
1697 let mut bcx = bcx;
1698 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1699 let ty = expr_ty(bcx, expr);
1700 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1701 // Always generate an lvalue datum, because this pointer doesn't own
1702 // the data and cleanup is scheduled elsewhere.
1703 DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
1704 } else {
1705 // Sized value, ref to a thin pointer
1706 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1707 }
1708 }
1709
1710 fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1711 binop_expr: &hir::Expr,
1712 binop_ty: Ty<'tcx>,
1713 op: hir::BinOp,
1714 lhs: Datum<'tcx, Rvalue>,
1715 rhs: Datum<'tcx, Rvalue>)
1716 -> DatumBlock<'blk, 'tcx, Expr>
1717 {
1718 let _icx = push_ctxt("trans_scalar_binop");
1719
1720 let tcx = bcx.tcx();
1721 let lhs_t = lhs.ty;
1722 assert!(!lhs_t.is_simd());
1723 let is_float = lhs_t.is_fp();
1724 let is_signed = lhs_t.is_signed();
1725 let info = expr_info(binop_expr);
1726
1727 let binop_debug_loc = binop_expr.debug_loc();
1728
1729 let mut bcx = bcx;
1730 let lhs = lhs.to_llscalarish(bcx);
1731 let rhs = rhs.to_llscalarish(bcx);
1732 let val = match op.node {
1733 hir::BiAdd => {
1734 if is_float {
1735 FAdd(bcx, lhs, rhs, binop_debug_loc)
1736 } else {
1737 let (newbcx, res) = with_overflow_check(
1738 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1739 bcx = newbcx;
1740 res
1741 }
1742 }
1743 hir::BiSub => {
1744 if is_float {
1745 FSub(bcx, lhs, rhs, binop_debug_loc)
1746 } else {
1747 let (newbcx, res) = with_overflow_check(
1748 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1749 bcx = newbcx;
1750 res
1751 }
1752 }
1753 hir::BiMul => {
1754 if is_float {
1755 FMul(bcx, lhs, rhs, binop_debug_loc)
1756 } else {
1757 let (newbcx, res) = with_overflow_check(
1758 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1759 bcx = newbcx;
1760 res
1761 }
1762 }
1763 hir::BiDiv => {
1764 if is_float {
1765 FDiv(bcx, lhs, rhs, binop_debug_loc)
1766 } else {
1767 // Only zero-check integers; fp /0 is NaN
1768 bcx = base::fail_if_zero_or_overflows(bcx,
1769 expr_info(binop_expr),
1770 op,
1771 lhs,
1772 rhs,
1773 lhs_t);
1774 if is_signed {
1775 SDiv(bcx, lhs, rhs, binop_debug_loc)
1776 } else {
1777 UDiv(bcx, lhs, rhs, binop_debug_loc)
1778 }
1779 }
1780 }
1781 hir::BiRem => {
1782 if is_float {
1783 // LLVM currently always lowers the `frem` instructions appropriate
1784 // library calls typically found in libm. Notably f64 gets wired up
1785 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1786 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1787 // instead just an inline function in a header that goes up to a
1788 // f64, uses `fmod`, and then comes back down to a f32.
1789 //
1790 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1791 // still unconditionally lower frem instructions over 32-bit floats
1792 // to a call to `fmodf`. To work around this we special case MSVC
1793 // 32-bit float rem instructions and instead do the call out to
1794 // `fmod` ourselves.
1795 //
1796 // Note that this is currently duplicated with src/libcore/ops.rs
1797 // which does the same thing, and it would be nice to perhaps unify
1798 // these two implementations on day! Also note that we call `fmod`
1799 // for both 32 and 64-bit floats because if we emit any FRem
1800 // instruction at all then LLVM is capable of optimizing it into a
1801 // 32-bit FRem (which we're trying to avoid).
1802 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
1803 tcx.sess.target.target.arch == "x86";
1804 if use_fmod {
1805 let f64t = Type::f64(bcx.ccx());
1806 let fty = Type::func(&[f64t, f64t], &f64t);
1807 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
1808 tcx.types.f64);
1809 if lhs_t == tcx.types.f32 {
1810 let lhs = FPExt(bcx, lhs, f64t);
1811 let rhs = FPExt(bcx, rhs, f64t);
1812 let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
1813 FPTrunc(bcx, res, Type::f32(bcx.ccx()))
1814 } else {
1815 Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc)
1816 }
1817 } else {
1818 FRem(bcx, lhs, rhs, binop_debug_loc)
1819 }
1820 } else {
1821 // Only zero-check integers; fp %0 is NaN
1822 bcx = base::fail_if_zero_or_overflows(bcx,
1823 expr_info(binop_expr),
1824 op, lhs, rhs, lhs_t);
1825 if is_signed {
1826 SRem(bcx, lhs, rhs, binop_debug_loc)
1827 } else {
1828 URem(bcx, lhs, rhs, binop_debug_loc)
1829 }
1830 }
1831 }
1832 hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1833 hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1834 hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1835 hir::BiShl => {
1836 let (newbcx, res) = with_overflow_check(
1837 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1838 bcx = newbcx;
1839 res
1840 }
1841 hir::BiShr => {
1842 let (newbcx, res) = with_overflow_check(
1843 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1844 bcx = newbcx;
1845 res
1846 }
1847 hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
1848 base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
1849 }
1850 _ => {
1851 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1852 }
1853 };
1854
1855 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1856 }
1857
1858 // refinement types would obviate the need for this
1859 enum lazy_binop_ty {
1860 lazy_and,
1861 lazy_or,
1862 }
1863
1864 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1865 binop_expr: &hir::Expr,
1866 op: lazy_binop_ty,
1867 a: &hir::Expr,
1868 b: &hir::Expr)
1869 -> DatumBlock<'blk, 'tcx, Expr> {
1870 let _icx = push_ctxt("trans_lazy_binop");
1871 let binop_ty = expr_ty(bcx, binop_expr);
1872 let fcx = bcx.fcx;
1873
1874 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1875 let lhs = lhs.to_llscalarish(past_lhs);
1876
1877 if past_lhs.unreachable.get() {
1878 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1879 }
1880
1881 let join = fcx.new_id_block("join", binop_expr.id);
1882 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1883
1884 match op {
1885 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1886 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1887 }
1888
1889 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1890 let rhs = rhs.to_llscalarish(past_rhs);
1891
1892 if past_rhs.unreachable.get() {
1893 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1894 }
1895
1896 Br(past_rhs, join.llbb, DebugLoc::None);
1897 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1898 &[past_lhs.llbb, past_rhs.llbb]);
1899
1900 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1901 }
1902
1903 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1904 expr: &hir::Expr,
1905 op: hir::BinOp,
1906 lhs: &hir::Expr,
1907 rhs: &hir::Expr)
1908 -> DatumBlock<'blk, 'tcx, Expr> {
1909 let _icx = push_ctxt("trans_binary");
1910 let ccx = bcx.ccx();
1911
1912 // if overloaded, would be RvalueDpsExpr
1913 assert!(!ccx.tcx().is_method_call(expr.id));
1914
1915 match op.node {
1916 hir::BiAnd => {
1917 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1918 }
1919 hir::BiOr => {
1920 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1921 }
1922 _ => {
1923 let mut bcx = bcx;
1924 let binop_ty = expr_ty(bcx, expr);
1925
1926 let lhs = unpack_datum!(bcx, trans(bcx, lhs));
1927 let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
1928 debug!("trans_binary (expr {}): lhs={}",
1929 expr.id, lhs.to_string(ccx));
1930 let rhs = unpack_datum!(bcx, trans(bcx, rhs));
1931 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
1932 debug!("trans_binary (expr {}): rhs={}",
1933 expr.id, rhs.to_string(ccx));
1934
1935 if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
1936 assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
1937 "built-in binary operators on fat pointers are homogeneous");
1938 assert_eq!(binop_ty, bcx.tcx().types.bool);
1939 let val = base::compare_scalar_types(
1940 bcx,
1941 lhs.val,
1942 rhs.val,
1943 lhs.ty,
1944 op.node,
1945 expr.debug_loc());
1946 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1947 } else {
1948 assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
1949 "built-in binary operators on fat pointers are homogeneous");
1950 trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
1951 }
1952 }
1953 }
1954 }
1955
1956 fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1957 expr: &hir::Expr,
1958 method_call: MethodCall,
1959 lhs: Datum<'tcx, Expr>,
1960 rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
1961 dest: Option<Dest>,
1962 autoref: bool)
1963 -> Result<'blk, 'tcx> {
1964 callee::trans_call_inner(bcx,
1965 expr.debug_loc(),
1966 |bcx, arg_cleanup_scope| {
1967 meth::trans_method_callee(bcx,
1968 method_call,
1969 None,
1970 arg_cleanup_scope)
1971 },
1972 callee::ArgOverloadedOp(lhs, rhs, autoref),
1973 dest)
1974 }
1975
1976 fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1977 expr: &hir::Expr,
1978 callee: &'a hir::Expr,
1979 args: &'a [P<hir::Expr>],
1980 dest: Option<Dest>)
1981 -> Block<'blk, 'tcx> {
1982 debug!("trans_overloaded_call {}", expr.id);
1983 let method_call = MethodCall::expr(expr.id);
1984 let mut all_args = vec!(callee);
1985 all_args.extend(args.iter().map(|e| &**e));
1986 unpack_result!(bcx,
1987 callee::trans_call_inner(bcx,
1988 expr.debug_loc(),
1989 |bcx, arg_cleanup_scope| {
1990 meth::trans_method_callee(
1991 bcx,
1992 method_call,
1993 None,
1994 arg_cleanup_scope)
1995 },
1996 callee::ArgOverloadedCall(all_args),
1997 dest));
1998 bcx
1999 }
2000
2001 pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
2002 expr: &hir::Expr,
2003 t_in: Ty<'tcx>,
2004 t_out: Ty<'tcx>)
2005 -> bool {
2006 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
2007 return true;
2008 }
2009
2010 match (t_in.builtin_deref(true, ty::NoPreference),
2011 t_out.builtin_deref(true, ty::NoPreference)) {
2012 (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
2013 t_in == t_out
2014 }
2015 _ => {
2016 // This condition isn't redundant with the check for CoercionCast:
2017 // different types can be substituted into the same type, and
2018 // == equality can be overconservative if there are regions.
2019 t_in == t_out
2020 }
2021 }
2022 }
2023
2024 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2025 expr: &hir::Expr,
2026 id: ast::NodeId)
2027 -> DatumBlock<'blk, 'tcx, Expr>
2028 {
2029 use middle::ty::cast::CastTy::*;
2030 use middle::ty::cast::IntTy::*;
2031
2032 fn int_cast(bcx: Block,
2033 lldsttype: Type,
2034 llsrctype: Type,
2035 llsrc: ValueRef,
2036 signed: bool)
2037 -> ValueRef
2038 {
2039 let _icx = push_ctxt("int_cast");
2040 let srcsz = llsrctype.int_width();
2041 let dstsz = lldsttype.int_width();
2042 return if dstsz == srcsz {
2043 BitCast(bcx, llsrc, lldsttype)
2044 } else if srcsz > dstsz {
2045 TruncOrBitCast(bcx, llsrc, lldsttype)
2046 } else if signed {
2047 SExtOrBitCast(bcx, llsrc, lldsttype)
2048 } else {
2049 ZExtOrBitCast(bcx, llsrc, lldsttype)
2050 }
2051 }
2052
2053 fn float_cast(bcx: Block,
2054 lldsttype: Type,
2055 llsrctype: Type,
2056 llsrc: ValueRef)
2057 -> ValueRef
2058 {
2059 let _icx = push_ctxt("float_cast");
2060 let srcsz = llsrctype.float_width();
2061 let dstsz = lldsttype.float_width();
2062 return if dstsz > srcsz {
2063 FPExt(bcx, llsrc, lldsttype)
2064 } else if srcsz > dstsz {
2065 FPTrunc(bcx, llsrc, lldsttype)
2066 } else { llsrc };
2067 }
2068
2069 let _icx = push_ctxt("trans_cast");
2070 let mut bcx = bcx;
2071 let ccx = bcx.ccx();
2072
2073 let t_in = expr_ty_adjusted(bcx, expr);
2074 let t_out = node_id_type(bcx, id);
2075
2076 debug!("trans_cast({:?} as {:?})", t_in, t_out);
2077 let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
2078 let ll_t_out = type_of::arg_type_of(ccx, t_out);
2079 // Convert the value to be cast into a ValueRef, either by-ref or
2080 // by-value as appropriate given its type:
2081 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
2082
2083 let datum_ty = monomorphize_type(bcx, datum.ty);
2084
2085 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
2086 datum.ty = t_out;
2087 return DatumBlock::new(bcx, datum);
2088 }
2089
2090 if type_is_fat_ptr(bcx.tcx(), t_in) {
2091 assert!(datum.kind.is_by_ref());
2092 if type_is_fat_ptr(bcx.tcx(), t_out) {
2093 return DatumBlock::new(bcx, Datum::new(
2094 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
2095 t_out,
2096 Rvalue::new(ByRef)
2097 )).to_expr_datumblock();
2098 } else {
2099 // Return the address
2100 return immediate_rvalue_bcx(bcx,
2101 PointerCast(bcx,
2102 Load(bcx, get_dataptr(bcx, datum.val)),
2103 ll_t_out),
2104 t_out).to_expr_datumblock();
2105 }
2106 }
2107
2108 let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
2109 let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
2110
2111 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
2112 let repr = adt::represent_type(ccx, t_in);
2113 let datum = unpack_datum!(
2114 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
2115 let llexpr_ptr = datum.to_llref();
2116 let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
2117 Some(Type::i64(ccx)), true);
2118 ll_t_in = val_ty(discr);
2119 (discr, adt::is_discr_signed(&repr))
2120 } else {
2121 (datum.to_llscalarish(bcx), t_in.is_signed())
2122 };
2123
2124 let newval = match (r_t_in, r_t_out) {
2125 (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
2126 PointerCast(bcx, llexpr, ll_t_out)
2127 }
2128 (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
2129 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
2130
2131 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
2132 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
2133 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
2134 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
2135 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
2136 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
2137
2138 _ => ccx.sess().span_bug(expr.span,
2139 &format!("translating unsupported cast: \
2140 {:?} -> {:?}",
2141 t_in,
2142 t_out)
2143 )
2144 };
2145 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
2146 }
2147
2148 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2149 expr: &hir::Expr,
2150 op: hir::BinOp,
2151 dst: &hir::Expr,
2152 src: &hir::Expr)
2153 -> Block<'blk, 'tcx> {
2154 let _icx = push_ctxt("trans_assign_op");
2155 let mut bcx = bcx;
2156
2157 debug!("trans_assign_op(expr={:?})", expr);
2158
2159 // User-defined operator methods cannot be used with `+=` etc right now
2160 assert!(!bcx.tcx().is_method_call(expr.id));
2161
2162 // Evaluate LHS (destination), which should be an lvalue
2163 let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
2164 assert!(!bcx.fcx.type_needs_drop(dst.ty));
2165 let lhs = load_ty(bcx, dst.val, dst.ty);
2166 let lhs = immediate_rvalue(lhs, dst.ty);
2167
2168 // Evaluate RHS - FIXME(#28160) this sucks
2169 let rhs = unpack_datum!(bcx, trans(bcx, &src));
2170 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
2171
2172 // Perform computation and store the result
2173 let result_datum = unpack_datum!(
2174 bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
2175 return result_datum.store_to(bcx, dst.val);
2176 }
2177
2178 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2179 datum: Datum<'tcx, Expr>,
2180 expr: &hir::Expr)
2181 -> DatumBlock<'blk, 'tcx, Expr> {
2182 let mut bcx = bcx;
2183
2184 // Ensure cleanup of `datum` if not already scheduled and obtain
2185 // a "by ref" pointer.
2186 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2187
2188 // Compute final type. Note that we are loose with the region and
2189 // mutability, since those things don't matter in trans.
2190 let referent_ty = lv_datum.ty;
2191 let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2192
2193 // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
2194 // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
2195 // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
2196 // indirection and for thin pointers, this has no ill effects.
2197 let kind = if type_is_sized(bcx.tcx(), referent_ty) {
2198 RvalueExpr(Rvalue::new(ByValue))
2199 } else {
2200 LvalueExpr(lv_datum.kind)
2201 };
2202
2203 // Get the pointer.
2204 let llref = lv_datum.to_llref();
2205 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
2206 }
2207
2208 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2209 expr: &hir::Expr,
2210 datum: Datum<'tcx, Expr>,
2211 times: usize)
2212 -> DatumBlock<'blk, 'tcx, Expr> {
2213 let mut bcx = bcx;
2214 let mut datum = datum;
2215 for i in 0..times {
2216 let method_call = MethodCall::autoderef(expr.id, i as u32);
2217 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2218 }
2219 DatumBlock { bcx: bcx, datum: datum }
2220 }
2221
2222 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2223 expr: &hir::Expr,
2224 datum: Datum<'tcx, Expr>,
2225 method_call: MethodCall)
2226 -> DatumBlock<'blk, 'tcx, Expr> {
2227 let ccx = bcx.ccx();
2228
2229 debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
2230 expr,
2231 datum.to_string(ccx),
2232 method_call);
2233
2234 let mut bcx = bcx;
2235
2236 // Check for overloaded deref.
2237 let method_ty = ccx.tcx()
2238 .tables
2239 .borrow()
2240 .method_map
2241 .get(&method_call).map(|method| method.ty);
2242
2243 let datum = match method_ty {
2244 Some(method_ty) => {
2245 let method_ty = monomorphize_type(bcx, method_ty);
2246
2247 // Overloaded. Evaluate `trans_overloaded_op`, which will
2248 // invoke the user's deref() method, which basically
2249 // converts from the `Smaht<T>` pointer that we have into
2250 // a `&T` pointer. We can then proceed down the normal
2251 // path (below) to dereference that `&T`.
2252 let datum = if method_call.autoderef == 0 {
2253 datum
2254 } else {
2255 // Always perform an AutoPtr when applying an overloaded auto-deref
2256 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2257 };
2258
2259 let ref_ty = // invoked methods have their LB regions instantiated
2260 ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
2261 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2262
2263 unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
2264 datum, None, Some(SaveIn(scratch.val)),
2265 false));
2266 scratch.to_expr_datum()
2267 }
2268 None => {
2269 // Not overloaded. We already have a pointer we know how to deref.
2270 datum
2271 }
2272 };
2273
2274 let r = match datum.ty.sty {
2275 ty::TyBox(content_ty) => {
2276 // Make sure we have an lvalue datum here to get the
2277 // proper cleanups scheduled
2278 let datum = unpack_datum!(
2279 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2280
2281 if type_is_sized(bcx.tcx(), content_ty) {
2282 let ptr = load_ty(bcx, datum.val, datum.ty);
2283 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
2284 } else {
2285 // A fat pointer and a DST lvalue have the same representation
2286 // just different types. Since there is no temporary for `*e`
2287 // here (because it is unsized), we cannot emulate the sized
2288 // object code path for running drop glue and free. Instead,
2289 // we schedule cleanup for `e`, turning it into an lvalue.
2290
2291 let lval = Lvalue::new("expr::deref_once ty_uniq");
2292 let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
2293 DatumBlock::new(bcx, datum)
2294 }
2295 }
2296
2297 ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
2298 ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
2299 let lval = Lvalue::new("expr::deref_once ptr");
2300 if type_is_sized(bcx.tcx(), content_ty) {
2301 let ptr = datum.to_llscalarish(bcx);
2302
2303 // Always generate an lvalue datum, even if datum.mode is
2304 // an rvalue. This is because datum.mode is only an
2305 // rvalue for non-owning pointers like &T or *T, in which
2306 // case cleanup *is* scheduled elsewhere, by the true
2307 // owner (or, in the case of *T, by the user).
2308 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
2309 } else {
2310 // A fat pointer and a DST lvalue have the same representation
2311 // just different types.
2312 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
2313 }
2314 }
2315
2316 _ => {
2317 bcx.tcx().sess.span_bug(
2318 expr.span,
2319 &format!("deref invoked on expr of invalid type {:?}",
2320 datum.ty));
2321 }
2322 };
2323
2324 debug!("deref_once(expr={}, method_call={:?}, result={})",
2325 expr.id, method_call, r.datum.to_string(ccx));
2326
2327 return r;
2328 }
2329
2330 #[derive(Debug)]
2331 enum OverflowOp {
2332 Add,
2333 Sub,
2334 Mul,
2335 Shl,
2336 Shr,
2337 }
2338
2339 impl OverflowOp {
2340 fn codegen_strategy(&self) -> OverflowCodegen {
2341 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2342 match *self {
2343 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2344 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2345 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2346
2347 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2348 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2349 }
2350 }
2351 }
2352
2353 enum OverflowCodegen {
2354 ViaIntrinsic(OverflowOpViaIntrinsic),
2355 ViaInputCheck(OverflowOpViaInputCheck),
2356 }
2357
2358 enum OverflowOpViaInputCheck { Shl, Shr, }
2359
2360 #[derive(Debug)]
2361 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2362
2363 impl OverflowOpViaIntrinsic {
2364 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2365 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2366 bcx.ccx().get_intrinsic(&name)
2367 }
2368 fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
2369 use syntax::ast::IntTy::*;
2370 use syntax::ast::UintTy::*;
2371 use middle::ty::{TyInt, TyUint};
2372
2373 let new_sty = match ty.sty {
2374 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
2375 "32" => TyInt(I32),
2376 "64" => TyInt(I64),
2377 _ => panic!("unsupported target word size")
2378 },
2379 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
2380 "32" => TyUint(U32),
2381 "64" => TyUint(U64),
2382 _ => panic!("unsupported target word size")
2383 },
2384 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
2385 _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
2386 *self)
2387 };
2388
2389 match *self {
2390 OverflowOpViaIntrinsic::Add => match new_sty {
2391 TyInt(I8) => "llvm.sadd.with.overflow.i8",
2392 TyInt(I16) => "llvm.sadd.with.overflow.i16",
2393 TyInt(I32) => "llvm.sadd.with.overflow.i32",
2394 TyInt(I64) => "llvm.sadd.with.overflow.i64",
2395
2396 TyUint(U8) => "llvm.uadd.with.overflow.i8",
2397 TyUint(U16) => "llvm.uadd.with.overflow.i16",
2398 TyUint(U32) => "llvm.uadd.with.overflow.i32",
2399 TyUint(U64) => "llvm.uadd.with.overflow.i64",
2400
2401 _ => unreachable!(),
2402 },
2403 OverflowOpViaIntrinsic::Sub => match new_sty {
2404 TyInt(I8) => "llvm.ssub.with.overflow.i8",
2405 TyInt(I16) => "llvm.ssub.with.overflow.i16",
2406 TyInt(I32) => "llvm.ssub.with.overflow.i32",
2407 TyInt(I64) => "llvm.ssub.with.overflow.i64",
2408
2409 TyUint(U8) => "llvm.usub.with.overflow.i8",
2410 TyUint(U16) => "llvm.usub.with.overflow.i16",
2411 TyUint(U32) => "llvm.usub.with.overflow.i32",
2412 TyUint(U64) => "llvm.usub.with.overflow.i64",
2413
2414 _ => unreachable!(),
2415 },
2416 OverflowOpViaIntrinsic::Mul => match new_sty {
2417 TyInt(I8) => "llvm.smul.with.overflow.i8",
2418 TyInt(I16) => "llvm.smul.with.overflow.i16",
2419 TyInt(I32) => "llvm.smul.with.overflow.i32",
2420 TyInt(I64) => "llvm.smul.with.overflow.i64",
2421
2422 TyUint(U8) => "llvm.umul.with.overflow.i8",
2423 TyUint(U16) => "llvm.umul.with.overflow.i16",
2424 TyUint(U32) => "llvm.umul.with.overflow.i32",
2425 TyUint(U64) => "llvm.umul.with.overflow.i64",
2426
2427 _ => unreachable!(),
2428 },
2429 }
2430 }
2431
2432 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2433 info: NodeIdAndSpan,
2434 lhs_t: Ty<'tcx>, lhs: ValueRef,
2435 rhs: ValueRef,
2436 binop_debug_loc: DebugLoc)
2437 -> (Block<'blk, 'tcx>, ValueRef) {
2438 let llfn = self.to_intrinsic(bcx, lhs_t);
2439
2440 let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
2441 let result = ExtractValue(bcx, val, 0); // iN operation result
2442 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2443
2444 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2445 binop_debug_loc);
2446
2447 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2448 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2449 None, binop_debug_loc);
2450
2451 let bcx =
2452 base::with_cond(bcx, cond, |bcx|
2453 controlflow::trans_fail(bcx, info,
2454 InternedString::new("arithmetic operation overflowed")));
2455
2456 (bcx, result)
2457 }
2458 }
2459
2460 impl OverflowOpViaInputCheck {
2461 fn build_with_input_check<'blk, 'tcx>(&self,
2462 bcx: Block<'blk, 'tcx>,
2463 info: NodeIdAndSpan,
2464 lhs_t: Ty<'tcx>,
2465 lhs: ValueRef,
2466 rhs: ValueRef,
2467 binop_debug_loc: DebugLoc)
2468 -> (Block<'blk, 'tcx>, ValueRef)
2469 {
2470 let lhs_llty = val_ty(lhs);
2471 let rhs_llty = val_ty(rhs);
2472
2473 // Panic if any bits are set outside of bits that we always
2474 // mask in.
2475 //
2476 // Note that the mask's value is derived from the LHS type
2477 // (since that is where the 32/64 distinction is relevant) but
2478 // the mask's type must match the RHS type (since they will
2479 // both be fed into an and-binop)
2480 let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
2481
2482 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2483 let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
2484 let result = match *self {
2485 OverflowOpViaInputCheck::Shl =>
2486 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2487 OverflowOpViaInputCheck::Shr =>
2488 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2489 };
2490 let bcx =
2491 base::with_cond(bcx, cond, |bcx|
2492 controlflow::trans_fail(bcx, info,
2493 InternedString::new("shift operation overflowed")));
2494
2495 (bcx, result)
2496 }
2497 }
2498
2499 // Check if an integer or vector contains a nonzero element.
2500 fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2501 value: ValueRef,
2502 binop_debug_loc: DebugLoc) -> ValueRef {
2503 let llty = val_ty(value);
2504 let kind = llty.kind();
2505 match kind {
2506 TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
2507 TypeKind::Vector => {
2508 // Check if any elements of the vector are nonzero by treating
2509 // it as a wide integer and checking if the integer is nonzero.
2510 let width = llty.vector_length() as u64 * llty.element_type().int_width();
2511 let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
2512 build_nonzero_check(bcx, int_value, binop_debug_loc)
2513 },
2514 _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
2515 }
2516 }
2517
2518 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2519 lhs_t: Ty<'tcx>, lhs: ValueRef,
2520 rhs: ValueRef,
2521 binop_debug_loc: DebugLoc)
2522 -> (Block<'blk, 'tcx>, ValueRef) {
2523 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2524 if bcx.ccx().check_overflow() {
2525
2526 match oop.codegen_strategy() {
2527 OverflowCodegen::ViaIntrinsic(oop) =>
2528 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2529 OverflowCodegen::ViaInputCheck(oop) =>
2530 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2531 }
2532 } else {
2533 let res = match oop {
2534 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2535 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2536 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2537
2538 OverflowOp::Shl =>
2539 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2540 OverflowOp::Shr =>
2541 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2542 };
2543 (bcx, res)
2544 }
2545 }
2546
2547 /// We categorize expressions into three kinds. The distinction between
2548 /// lvalue/rvalue is fundamental to the language. The distinction between the
2549 /// two kinds of rvalues is an artifact of trans which reflects how we will
2550 /// generate code for that kind of expression. See trans/expr.rs for more
2551 /// information.
2552 #[derive(Copy, Clone)]
2553 enum ExprKind {
2554 Lvalue,
2555 RvalueDps,
2556 RvalueDatum,
2557 RvalueStmt
2558 }
2559
2560 fn expr_kind(tcx: &ty::ctxt, expr: &hir::Expr) -> ExprKind {
2561 if tcx.is_method_call(expr.id) {
2562 // Overloaded operations are generally calls, and hence they are
2563 // generated via DPS, but there are a few exceptions:
2564 return match expr.node {
2565 // `a += b` has a unit result.
2566 hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
2567
2568 // the deref method invoked for `*a` always yields an `&T`
2569 hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
2570
2571 // the index method invoked for `a[i]` always yields an `&T`
2572 hir::ExprIndex(..) => ExprKind::Lvalue,
2573
2574 // in the general case, result could be any type, use DPS
2575 _ => ExprKind::RvalueDps
2576 };
2577 }
2578
2579 match expr.node {
2580 hir::ExprPath(..) => {
2581 match tcx.resolve_expr(expr) {
2582 Def::Struct(..) | Def::Variant(..) => {
2583 if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
2584 // ctor function
2585 ExprKind::RvalueDatum
2586 } else {
2587 ExprKind::RvalueDps
2588 }
2589 }
2590
2591 // Fn pointers are just scalar values.
2592 Def::Fn(..) | Def::Method(..) => ExprKind::RvalueDatum,
2593
2594 // Note: there is actually a good case to be made that
2595 // DefArg's, particularly those of immediate type, ought to
2596 // considered rvalues.
2597 Def::Static(..) |
2598 Def::Upvar(..) |
2599 Def::Local(..) => ExprKind::Lvalue,
2600
2601 Def::Const(..) |
2602 Def::AssociatedConst(..) => ExprKind::RvalueDatum,
2603
2604 def => {
2605 tcx.sess.span_bug(
2606 expr.span,
2607 &format!("uncategorized def for expr {}: {:?}",
2608 expr.id,
2609 def));
2610 }
2611 }
2612 }
2613
2614 hir::ExprType(ref expr, _) => {
2615 expr_kind(tcx, expr)
2616 }
2617
2618 hir::ExprUnary(hir::UnDeref, _) |
2619 hir::ExprField(..) |
2620 hir::ExprTupField(..) |
2621 hir::ExprIndex(..) => {
2622 ExprKind::Lvalue
2623 }
2624
2625 hir::ExprCall(..) |
2626 hir::ExprMethodCall(..) |
2627 hir::ExprStruct(..) |
2628 hir::ExprRange(..) |
2629 hir::ExprTup(..) |
2630 hir::ExprIf(..) |
2631 hir::ExprMatch(..) |
2632 hir::ExprClosure(..) |
2633 hir::ExprBlock(..) |
2634 hir::ExprRepeat(..) |
2635 hir::ExprVec(..) => {
2636 ExprKind::RvalueDps
2637 }
2638
2639 hir::ExprLit(ref lit) if lit.node.is_str() => {
2640 ExprKind::RvalueDps
2641 }
2642
2643 hir::ExprBreak(..) |
2644 hir::ExprAgain(..) |
2645 hir::ExprRet(..) |
2646 hir::ExprWhile(..) |
2647 hir::ExprLoop(..) |
2648 hir::ExprAssign(..) |
2649 hir::ExprInlineAsm(..) |
2650 hir::ExprAssignOp(..) => {
2651 ExprKind::RvalueStmt
2652 }
2653
2654 hir::ExprLit(_) | // Note: LitStr is carved out above
2655 hir::ExprUnary(..) |
2656 hir::ExprBox(_) |
2657 hir::ExprAddrOf(..) |
2658 hir::ExprBinary(..) |
2659 hir::ExprCast(..) => {
2660 ExprKind::RvalueDatum
2661 }
2662 }
2663 }