]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/expr.rs
Imported Upstream version 1.6.0+dfsg1
[rustc.git] / src / librustc_trans / trans / expr.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! # Translation of Expressions
12 //!
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
20 //!
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
24 //!
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
31 //!
32 //! Public entry points:
33 //!
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
36 //! can manage it.
37 //!
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
41 //! structural type.
42 //!
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
46 //!
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
48
49 #![allow(non_camel_case_types)]
50
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
53
54 use back::abi;
55 use llvm::{self, ValueRef, TypeKind};
56 use middle::check_const;
57 use middle::def;
58 use middle::lang_items::CoerceUnsizedTraitLangItem;
59 use middle::subst::{Substs, VecPerParamSpace};
60 use middle::traits;
61 use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
62 use trans::base::*;
63 use trans::build::*;
64 use trans::cleanup::{self, CleanupMethods, DropHintMethods};
65 use trans::common::*;
66 use trans::datum::*;
67 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
68 use trans::declare;
69 use trans::glue;
70 use trans::machine;
71 use trans::meth;
72 use trans::tvec;
73 use trans::type_of;
74 use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
75 use middle::ty::adjustment::{AdjustUnsafeFnPointer, CustomCoerceUnsized};
76 use middle::ty::{self, Ty};
77 use middle::ty::MethodCall;
78 use middle::ty::cast::{CastKind, CastTy};
79 use util::common::indenter;
80 use trans::machine::{llsize_of, llsize_of_alloc};
81 use trans::type_::Type;
82
83 use rustc_front;
84 use rustc_front::hir;
85
86 use syntax::{ast, ast_util, codemap};
87 use syntax::parse::token::InternedString;
88 use syntax::ptr::P;
89 use syntax::parse::token;
90 use std::mem;
91
92 // Destinations
93
94 // These are passed around by the code generating functions to track the
95 // destination of a computation's value.
96
97 #[derive(Copy, Clone, PartialEq)]
98 pub enum Dest {
99 SaveIn(ValueRef),
100 Ignore,
101 }
102
103 impl Dest {
104 pub fn to_string(&self, ccx: &CrateContext) -> String {
105 match *self {
106 SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
107 Ignore => "Ignore".to_string()
108 }
109 }
110 }
111
112 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
113 /// better optimized LLVM code.
114 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
115 expr: &hir::Expr,
116 dest: Dest)
117 -> Block<'blk, 'tcx> {
118 let mut bcx = bcx;
119
120 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
121
122 if adjustment_required(bcx, expr) {
123 // use trans, which may be less efficient but
124 // which will perform the adjustments:
125 let datum = unpack_datum!(bcx, trans(bcx, expr));
126 return datum.store_to_dest(bcx, dest, expr.id);
127 }
128
129 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
130 if !qualif.intersects(
131 check_const::ConstQualif::NOT_CONST |
132 check_const::ConstQualif::NEEDS_DROP
133 ) {
134 if !qualif.intersects(check_const::ConstQualif::PREFER_IN_PLACE) {
135 if let SaveIn(lldest) = dest {
136 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
137 bcx.fcx.param_substs,
138 consts::TrueConst::No) {
139 Ok(global) => {
140 // Cast pointer to destination, because constants
141 // have different types.
142 let lldest = PointerCast(bcx, lldest, val_ty(global));
143 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
144 return bcx;
145 },
146 Err(consts::ConstEvalFailure::Runtime(_)) => {
147 // in case const evaluation errors, translate normally
148 // debug assertions catch the same errors
149 // see RFC 1229
150 },
151 Err(consts::ConstEvalFailure::Compiletime(_)) => {
152 return bcx;
153 },
154 }
155 }
156 // Even if we don't have a value to emit, and the expression
157 // doesn't have any side-effects, we still have to translate the
158 // body of any closures.
159 // FIXME: Find a better way of handling this case.
160 } else {
161 // The only way we're going to see a `const` at this point is if
162 // it prefers in-place instantiation, likely because it contains
163 // `[x; N]` somewhere within.
164 match expr.node {
165 hir::ExprPath(..) => {
166 match bcx.def(expr.id) {
167 def::DefConst(did) => {
168 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr);
169 // Temporarily get cleanup scopes out of the way,
170 // as they require sub-expressions to be contained
171 // inside the current AST scope.
172 // These should record no cleanups anyways, `const`
173 // can't have destructors.
174 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
175 vec![]);
176 // Lock emitted debug locations to the location of
177 // the constant reference expression.
178 debuginfo::with_source_location_override(bcx.fcx,
179 expr.debug_loc(),
180 || {
181 bcx = trans_into(bcx, const_expr, dest)
182 });
183 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
184 scopes);
185 assert!(scopes.is_empty());
186 return bcx;
187 }
188 _ => {}
189 }
190 }
191 _ => {}
192 }
193 }
194 }
195
196 debug!("trans_into() expr={:?}", expr);
197
198 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
199 expr.id,
200 expr.span,
201 false);
202 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
203
204 let kind = expr_kind(bcx.tcx(), expr);
205 bcx = match kind {
206 ExprKind::Lvalue | ExprKind::RvalueDatum => {
207 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
208 }
209 ExprKind::RvalueDps => {
210 trans_rvalue_dps_unadjusted(bcx, expr, dest)
211 }
212 ExprKind::RvalueStmt => {
213 trans_rvalue_stmt_unadjusted(bcx, expr)
214 }
215 };
216
217 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
218 }
219
220 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
221 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
222 /// stack.
223 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
224 expr: &hir::Expr)
225 -> DatumBlock<'blk, 'tcx, Expr> {
226 debug!("trans(expr={:?})", expr);
227
228 let mut bcx = bcx;
229 let fcx = bcx.fcx;
230 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
231 let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS);
232 let global = if !qualif.intersects(
233 check_const::ConstQualif::NOT_CONST |
234 check_const::ConstQualif::NEEDS_DROP
235 ) {
236 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
237 bcx.fcx.param_substs,
238 consts::TrueConst::No) {
239 Ok(global) => {
240 if qualif.intersects(check_const::ConstQualif::HAS_STATIC_BORROWS) {
241 // Is borrowed as 'static, must return lvalue.
242
243 // Cast pointer to global, because constants have different types.
244 let const_ty = expr_ty_adjusted(bcx, expr);
245 let llty = type_of::type_of(bcx.ccx(), const_ty);
246 let global = PointerCast(bcx, global, llty.ptr_to());
247 let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
248 return DatumBlock::new(bcx, datum.to_expr_datum());
249 }
250
251 // Otherwise, keep around and perform adjustments, if needed.
252 let const_ty = if adjusted_global {
253 expr_ty_adjusted(bcx, expr)
254 } else {
255 expr_ty(bcx, expr)
256 };
257
258 // This could use a better heuristic.
259 Some(if type_is_immediate(bcx.ccx(), const_ty) {
260 // Cast pointer to global, because constants have different types.
261 let llty = type_of::type_of(bcx.ccx(), const_ty);
262 let global = PointerCast(bcx, global, llty.ptr_to());
263 // Maybe just get the value directly, instead of loading it?
264 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
265 } else {
266 let scratch = alloc_ty(bcx, const_ty, "const");
267 call_lifetime_start(bcx, scratch);
268 let lldest = if !const_ty.is_structural() {
269 // Cast pointer to slot, because constants have different types.
270 PointerCast(bcx, scratch, val_ty(global))
271 } else {
272 // In this case, memcpy_ty calls llvm.memcpy after casting both
273 // source and destination to i8*, so we don't need any casts.
274 scratch
275 };
276 memcpy_ty(bcx, lldest, global, const_ty);
277 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
278 })
279 },
280 Err(consts::ConstEvalFailure::Runtime(_)) => {
281 // in case const evaluation errors, translate normally
282 // debug assertions catch the same errors
283 // see RFC 1229
284 None
285 },
286 Err(consts::ConstEvalFailure::Compiletime(_)) => {
287 // generate a dummy llvm value
288 let const_ty = expr_ty(bcx, expr);
289 let llty = type_of::type_of(bcx.ccx(), const_ty);
290 let dummy = C_undef(llty.ptr_to());
291 Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
292 },
293 }
294 } else {
295 None
296 };
297
298 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
299 expr.id,
300 expr.span,
301 false);
302 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
303 let datum = match global {
304 Some(rvalue) => rvalue.to_expr_datum(),
305 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
306 };
307 let datum = if adjusted_global {
308 datum // trans::consts already performed adjustments.
309 } else {
310 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
311 };
312 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
313 return DatumBlock::new(bcx, datum);
314 }
315
316 pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
317 StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
318 }
319
320 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
321 StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
322 }
323
324 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
325 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
326 Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
327 }
328
329 fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
330 expr: &hir::Expr) -> bool {
331 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
332 None => { return false; }
333 Some(adj) => adj
334 };
335
336 // Don't skip a conversion from Box<T> to &T, etc.
337 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
338 return true;
339 }
340
341 match adjustment {
342 AdjustReifyFnPointer => {
343 // FIXME(#19925) once fn item types are
344 // zero-sized, we'll need to return true here
345 false
346 }
347 AdjustUnsafeFnPointer => {
348 // purely a type-level thing
349 false
350 }
351 AdjustDerefRef(ref adj) => {
352 // We are a bit paranoid about adjustments and thus might have a re-
353 // borrow here which merely derefs and then refs again (it might have
354 // a different region or mutability, but we don't care here).
355 !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
356 }
357 }
358 }
359
360 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
361 /// translation of `expr`.
362 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
363 expr: &hir::Expr,
364 datum: Datum<'tcx, Expr>)
365 -> DatumBlock<'blk, 'tcx, Expr>
366 {
367 let mut bcx = bcx;
368 let mut datum = datum;
369 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
370 None => {
371 return DatumBlock::new(bcx, datum);
372 }
373 Some(adj) => { adj }
374 };
375 debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
376 expr,
377 datum.to_string(bcx.ccx()),
378 adjustment);
379 match adjustment {
380 AdjustReifyFnPointer => {
381 // FIXME(#19925) once fn item types are
382 // zero-sized, we'll need to do something here
383 }
384 AdjustUnsafeFnPointer => {
385 // purely a type-level thing
386 }
387 AdjustDerefRef(ref adj) => {
388 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
389 // We are a bit paranoid about adjustments and thus might have a re-
390 // borrow here which merely derefs and then refs again (it might have
391 // a different region or mutability, but we don't care here).
392 match datum.ty.sty {
393 // Don't skip a conversion from Box<T> to &T, etc.
394 ty::TyRef(..) => {
395 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
396 // Don't skip an overloaded deref.
397 0
398 } else {
399 1
400 }
401 }
402 _ => 0
403 }
404 } else {
405 0
406 };
407
408 if adj.autoderefs > skip_reborrows {
409 // Schedule cleanup.
410 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
411 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
412 lval.to_expr_datum(),
413 adj.autoderefs - skip_reborrows));
414 }
415
416 // (You might think there is a more elegant way to do this than a
417 // skip_reborrows bool, but then you remember that the borrow checker exists).
418 if skip_reborrows == 0 && adj.autoref.is_some() {
419 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
420 }
421
422 if let Some(target) = adj.unsize {
423 // We do not arrange cleanup ourselves; if we already are an
424 // L-value, then cleanup will have already been scheduled (and
425 // the `datum.to_rvalue_datum` call below will emit code to zero
426 // the drop flag when moving out of the L-value). If we are an
427 // R-value, then we do not need to schedule cleanup.
428 let source_datum = unpack_datum!(bcx,
429 datum.to_rvalue_datum(bcx, "__coerce_source"));
430
431 let target = bcx.monomorphize(&target);
432
433 let scratch = alloc_ty(bcx, target, "__coerce_target");
434 call_lifetime_start(bcx, scratch);
435 let target_datum = Datum::new(scratch, target,
436 Rvalue::new(ByRef));
437 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
438 datum = Datum::new(scratch, target,
439 RvalueExpr(Rvalue::new(ByRef)));
440 }
441 }
442 }
443 debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
444 DatumBlock::new(bcx, datum)
445 }
446
447 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
448 span: codemap::Span,
449 source: Datum<'tcx, Rvalue>,
450 target: Datum<'tcx, Rvalue>)
451 -> Block<'blk, 'tcx> {
452 let mut bcx = bcx;
453 debug!("coerce_unsized({} -> {})",
454 source.to_string(bcx.ccx()),
455 target.to_string(bcx.ccx()));
456
457 match (&source.ty.sty, &target.ty.sty) {
458 (&ty::TyBox(a), &ty::TyBox(b)) |
459 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
460 &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
461 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
462 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
463 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
464 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
465 let (inner_source, inner_target) = (a, b);
466
467 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
468 // Normally, the source is a thin pointer and we are
469 // adding extra info to make a fat pointer. The exception
470 // is when we are upcasting an existing object fat pointer
471 // to use a different vtable. In that case, we want to
472 // load out the original data pointer so we can repackage
473 // it.
474 (Load(bcx, get_dataptr(bcx, source.val)),
475 Some(Load(bcx, get_meta(bcx, source.val))))
476 } else {
477 let val = if source.kind.is_by_ref() {
478 load_ty(bcx, source.val, source.ty)
479 } else {
480 source.val
481 };
482 (val, None)
483 };
484
485 let info = unsized_info(bcx.ccx(), inner_source, inner_target,
486 old_info, bcx.fcx.param_substs);
487
488 // Compute the base pointer. This doesn't change the pointer value,
489 // but merely its type.
490 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
491 let base = PointerCast(bcx, base, ptr_ty);
492
493 Store(bcx, base, get_dataptr(bcx, target.val));
494 Store(bcx, info, get_meta(bcx, target.val));
495 }
496
497 // This can be extended to enums and tuples in the future.
498 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
499 (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
500 assert_eq!(def_id_a, def_id_b);
501
502 // The target is already by-ref because it's to be written to.
503 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
504 assert!(target.kind.is_by_ref());
505
506 let trait_substs = Substs::erased(VecPerParamSpace::new(vec![target.ty],
507 vec![source.ty],
508 Vec::new()));
509 let trait_ref = ty::Binder(ty::TraitRef {
510 def_id: langcall(bcx, Some(span), "coercion",
511 CoerceUnsizedTraitLangItem),
512 substs: bcx.tcx().mk_substs(trait_substs)
513 });
514
515 let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) {
516 traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
517 bcx.tcx().custom_coerce_unsized_kind(impl_def_id)
518 }
519 vtable => {
520 bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {:?}",
521 vtable));
522 }
523 };
524
525 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
526 let src_fields = match &*repr_source {
527 &adt::Repr::Univariant(ref s, _) => &s.fields,
528 _ => bcx.sess().span_bug(span,
529 &format!("Non univariant struct? (repr_source: {:?})",
530 repr_source)),
531 };
532 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
533 let target_fields = match &*repr_target {
534 &adt::Repr::Univariant(ref s, _) => &s.fields,
535 _ => bcx.sess().span_bug(span,
536 &format!("Non univariant struct? (repr_target: {:?})",
537 repr_target)),
538 };
539
540 let coerce_index = match kind {
541 CustomCoerceUnsized::Struct(i) => i
542 };
543 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
544
545 let source_val = adt::MaybeSizedValue::sized(source.val);
546 let target_val = adt::MaybeSizedValue::sized(target.val);
547
548 let iter = src_fields.iter().zip(target_fields).enumerate();
549 for (i, (src_ty, target_ty)) in iter {
550 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, 0, i);
551 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, 0, i);
552
553 // If this is the field we need to coerce, recurse on it.
554 if i == coerce_index {
555 coerce_unsized(bcx, span,
556 Datum::new(ll_source, src_ty,
557 Rvalue::new(ByRef)),
558 Datum::new(ll_target, target_ty,
559 Rvalue::new(ByRef)));
560 } else {
561 // Otherwise, simply copy the data from the source.
562 assert!(src_ty.is_phantom_data() || src_ty == target_ty);
563 memcpy_ty(bcx, ll_target, ll_source, src_ty);
564 }
565 }
566 }
567 _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
568 source.ty,
569 target.ty))
570 }
571 bcx
572 }
573
574 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
575 /// that the expr represents.
576 ///
577 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
578 /// something like `x().f` is translated into roughly the equivalent of
579 ///
580 /// { tmp = x(); tmp.f }
581 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
582 expr: &hir::Expr,
583 name: &str)
584 -> DatumBlock<'blk, 'tcx, Lvalue> {
585 let mut bcx = bcx;
586 let datum = unpack_datum!(bcx, trans(bcx, expr));
587 return datum.to_lvalue_datum(bcx, name, expr.id);
588 }
589
590 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
591 /// directly.
592 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
593 expr: &hir::Expr)
594 -> DatumBlock<'blk, 'tcx, Expr> {
595 let mut bcx = bcx;
596
597 debug!("trans_unadjusted(expr={:?})", expr);
598 let _indenter = indenter();
599
600 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
601
602 return match expr_kind(bcx.tcx(), expr) {
603 ExprKind::Lvalue | ExprKind::RvalueDatum => {
604 let datum = unpack_datum!(bcx, {
605 trans_datum_unadjusted(bcx, expr)
606 });
607
608 DatumBlock {bcx: bcx, datum: datum}
609 }
610
611 ExprKind::RvalueStmt => {
612 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
613 nil(bcx, expr_ty(bcx, expr))
614 }
615
616 ExprKind::RvalueDps => {
617 let ty = expr_ty(bcx, expr);
618 if type_is_zero_size(bcx.ccx(), ty) {
619 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
620 nil(bcx, ty)
621 } else {
622 let scratch = rvalue_scratch_datum(bcx, ty, "");
623 bcx = trans_rvalue_dps_unadjusted(
624 bcx, expr, SaveIn(scratch.val));
625
626 // Note: this is not obviously a good idea. It causes
627 // immediate values to be loaded immediately after a
628 // return from a call or other similar expression,
629 // which in turn leads to alloca's having shorter
630 // lifetimes and hence larger stack frames. However,
631 // in turn it can lead to more register pressure.
632 // Still, in practice it seems to increase
633 // performance, since we have fewer problems with
634 // morestack churn.
635 let scratch = unpack_datum!(
636 bcx, scratch.to_appropriate_datum(bcx));
637
638 DatumBlock::new(bcx, scratch.to_expr_datum())
639 }
640 }
641 };
642
643 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
644 -> DatumBlock<'blk, 'tcx, Expr> {
645 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
646 let datum = immediate_rvalue(llval, ty);
647 DatumBlock::new(bcx, datum.to_expr_datum())
648 }
649 }
650
651 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
652 expr: &hir::Expr)
653 -> DatumBlock<'blk, 'tcx, Expr> {
654 let mut bcx = bcx;
655 let fcx = bcx.fcx;
656 let _icx = push_ctxt("trans_datum_unadjusted");
657
658 match expr.node {
659 hir::ExprPath(..) => {
660 trans_def(bcx, expr, bcx.def(expr.id))
661 }
662 hir::ExprField(ref base, name) => {
663 trans_rec_field(bcx, &**base, name.node)
664 }
665 hir::ExprTupField(ref base, idx) => {
666 trans_rec_tup_field(bcx, &**base, idx.node)
667 }
668 hir::ExprIndex(ref base, ref idx) => {
669 trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
670 }
671 hir::ExprBox(ref contents) => {
672 // Special case for `Box<T>`
673 let box_ty = expr_ty(bcx, expr);
674 let contents_ty = expr_ty(bcx, &**contents);
675 match box_ty.sty {
676 ty::TyBox(..) => {
677 trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty)
678 }
679 _ => bcx.sess().span_bug(expr.span,
680 "expected unique box")
681 }
682
683 }
684 hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit),
685 hir::ExprBinary(op, ref lhs, ref rhs) => {
686 trans_binary(bcx, expr, op, &**lhs, &**rhs)
687 }
688 hir::ExprUnary(op, ref x) => {
689 trans_unary(bcx, expr, op, &**x)
690 }
691 hir::ExprAddrOf(_, ref x) => {
692 match x.node {
693 hir::ExprRepeat(..) | hir::ExprVec(..) => {
694 // Special case for slices.
695 let cleanup_debug_loc =
696 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
697 x.id,
698 x.span,
699 false);
700 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
701 let datum = unpack_datum!(
702 bcx, tvec::trans_slice_vec(bcx, expr, &**x));
703 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
704 DatumBlock::new(bcx, datum)
705 }
706 _ => {
707 trans_addr_of(bcx, expr, &**x)
708 }
709 }
710 }
711 hir::ExprCast(ref val, _) => {
712 // Datum output mode means this is a scalar cast:
713 trans_imm_cast(bcx, &**val, expr.id)
714 }
715 _ => {
716 bcx.tcx().sess.span_bug(
717 expr.span,
718 &format!("trans_rvalue_datum_unadjusted reached \
719 fall-through case: {:?}",
720 expr.node));
721 }
722 }
723 }
724
725 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
726 base: &hir::Expr,
727 get_idx: F)
728 -> DatumBlock<'blk, 'tcx, Expr> where
729 F: FnOnce(&'blk ty::ctxt<'tcx>, &VariantInfo<'tcx>) -> usize,
730 {
731 let mut bcx = bcx;
732 let _icx = push_ctxt("trans_rec_field");
733
734 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
735 let bare_ty = base_datum.ty;
736 let repr = adt::represent_type(bcx.ccx(), bare_ty);
737 let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
738
739 let ix = get_idx(bcx.tcx(), &vinfo);
740 let d = base_datum.get_element(
741 bcx,
742 vinfo.fields[ix].1,
743 |srcval| {
744 adt::trans_field_ptr(bcx, &*repr, srcval, vinfo.discr, ix)
745 });
746
747 if type_is_sized(bcx.tcx(), d.ty) {
748 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
749 } else {
750 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
751 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
752 let info = Load(bcx, get_meta(bcx, base_datum.val));
753 Store(bcx, info, get_meta(bcx, scratch.val));
754
755 // Always generate an lvalue datum, because this pointer doesn't own
756 // the data and cleanup is scheduled elsewhere.
757 DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
758 }
759 }
760
761 /// Translates `base.field`.
762 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
763 base: &hir::Expr,
764 field: ast::Name)
765 -> DatumBlock<'blk, 'tcx, Expr> {
766 trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
767 }
768
769 /// Translates `base.<idx>`.
770 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
771 base: &hir::Expr,
772 idx: usize)
773 -> DatumBlock<'blk, 'tcx, Expr> {
774 trans_field(bcx, base, |_, _| idx)
775 }
776
777 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
778 index_expr: &hir::Expr,
779 base: &hir::Expr,
780 idx: &hir::Expr,
781 method_call: MethodCall)
782 -> DatumBlock<'blk, 'tcx, Expr> {
783 //! Translates `base[idx]`.
784
785 let _icx = push_ctxt("trans_index");
786 let ccx = bcx.ccx();
787 let mut bcx = bcx;
788
789 let index_expr_debug_loc = index_expr.debug_loc();
790
791 // Check for overloaded index.
792 let method_ty = ccx.tcx()
793 .tables
794 .borrow()
795 .method_map
796 .get(&method_call)
797 .map(|method| method.ty);
798 let elt_datum = match method_ty {
799 Some(method_ty) => {
800 let method_ty = monomorphize_type(bcx, method_ty);
801
802 let base_datum = unpack_datum!(bcx, trans(bcx, base));
803
804 // Translate index expression.
805 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
806
807 let ref_ty = // invoked methods have LB regions instantiated:
808 bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
809 let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
810 None => {
811 bcx.tcx().sess.span_bug(index_expr.span,
812 "index method didn't return a \
813 dereferenceable type?!")
814 }
815 Some(elt_tm) => elt_tm.ty,
816 };
817
818 // Overloaded. Evaluate `trans_overloaded_op`, which will
819 // invoke the user's index() method, which basically yields
820 // a `&T` pointer. We can then proceed down the normal
821 // path (below) to dereference that `&T`.
822 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
823 unpack_result!(bcx,
824 trans_overloaded_op(bcx,
825 index_expr,
826 method_call,
827 base_datum,
828 Some((ix_datum, idx.id)),
829 Some(SaveIn(scratch.val)),
830 false));
831 let datum = scratch.to_expr_datum();
832 let lval = Lvalue::new("expr::trans_index overload");
833 if type_is_sized(bcx.tcx(), elt_ty) {
834 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
835 } else {
836 Datum::new(datum.val, elt_ty, LvalueExpr(lval))
837 }
838 }
839 None => {
840 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
841 base,
842 "index"));
843
844 // Translate index expression and cast to a suitable LLVM integer.
845 // Rust is less strict than LLVM in this regard.
846 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
847 let ix_val = ix_datum.to_llscalarish(bcx);
848 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
849 val_ty(ix_val));
850 let int_size = machine::llbitsize_of_real(bcx.ccx(),
851 ccx.int_type());
852 let ix_val = {
853 if ix_size < int_size {
854 if expr_ty(bcx, idx).is_signed() {
855 SExt(bcx, ix_val, ccx.int_type())
856 } else { ZExt(bcx, ix_val, ccx.int_type()) }
857 } else if ix_size > int_size {
858 Trunc(bcx, ix_val, ccx.int_type())
859 } else {
860 ix_val
861 }
862 };
863
864 let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
865
866 let (base, len) = base_datum.get_vec_base_and_len(bcx);
867
868 debug!("trans_index: base {}", bcx.val_to_string(base));
869 debug!("trans_index: len {}", bcx.val_to_string(len));
870
871 let bounds_check = ICmp(bcx,
872 llvm::IntUGE,
873 ix_val,
874 len,
875 index_expr_debug_loc);
876 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
877 let expected = Call(bcx,
878 expect,
879 &[bounds_check, C_bool(ccx, false)],
880 None,
881 index_expr_debug_loc);
882 bcx = with_cond(bcx, expected, |bcx| {
883 controlflow::trans_fail_bounds_check(bcx,
884 expr_info(index_expr),
885 ix_val,
886 len)
887 });
888 let elt = InBoundsGEP(bcx, base, &[ix_val]);
889 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
890 let lval = Lvalue::new("expr::trans_index fallback");
891 Datum::new(elt, unit_ty, LvalueExpr(lval))
892 }
893 };
894
895 DatumBlock::new(bcx, elt_datum)
896 }
897
898 fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
899 ref_expr: &hir::Expr,
900 def: def::Def)
901 -> DatumBlock<'blk, 'tcx, Expr> {
902 //! Translates a reference to a path.
903
904 let _icx = push_ctxt("trans_def_lvalue");
905 match def {
906 def::DefFn(..) | def::DefMethod(..) |
907 def::DefStruct(_) | def::DefVariant(..) => {
908 let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
909 bcx.fcx.param_substs);
910 DatumBlock::new(bcx, datum.to_expr_datum())
911 }
912 def::DefStatic(did, _) => {
913 let const_ty = expr_ty(bcx, ref_expr);
914 let val = get_static_val(bcx.ccx(), did, const_ty);
915 let lval = Lvalue::new("expr::trans_def");
916 DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
917 }
918 def::DefConst(_) => {
919 bcx.sess().span_bug(ref_expr.span,
920 "constant expression should not reach expr::trans_def")
921 }
922 _ => {
923 DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
924 }
925 }
926 }
927
928 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
929 expr: &hir::Expr)
930 -> Block<'blk, 'tcx> {
931 let mut bcx = bcx;
932 let _icx = push_ctxt("trans_rvalue_stmt");
933
934 if bcx.unreachable.get() {
935 return bcx;
936 }
937
938 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
939
940 match expr.node {
941 hir::ExprBreak(label_opt) => {
942 controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
943 }
944 hir::ExprAgain(label_opt) => {
945 controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
946 }
947 hir::ExprRet(ref ex) => {
948 // Check to see if the return expression itself is reachable.
949 // This can occur when the inner expression contains a return
950 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
951 cfg.node_is_reachable(expr.id)
952 } else {
953 true
954 };
955
956 if reachable {
957 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
958 } else {
959 // If it's not reachable, just translate the inner expression
960 // directly. This avoids having to manage a return slot when
961 // it won't actually be used anyway.
962 if let &Some(ref x) = ex {
963 bcx = trans_into(bcx, &**x, Ignore);
964 }
965 // Mark the end of the block as unreachable. Once we get to
966 // a return expression, there's no more we should be doing
967 // after this.
968 Unreachable(bcx);
969 bcx
970 }
971 }
972 hir::ExprWhile(ref cond, ref body, _) => {
973 controlflow::trans_while(bcx, expr, &**cond, &**body)
974 }
975 hir::ExprLoop(ref body, _) => {
976 controlflow::trans_loop(bcx, expr, &**body)
977 }
978 hir::ExprAssign(ref dst, ref src) => {
979 let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
980 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
981
982 if bcx.fcx.type_needs_drop(dst_datum.ty) {
983 // If there are destructors involved, make sure we
984 // are copying from an rvalue, since that cannot possible
985 // alias an lvalue. We are concerned about code like:
986 //
987 // a = a
988 //
989 // but also
990 //
991 // a = a.b
992 //
993 // where e.g. a : Option<Foo> and a.b :
994 // Option<Foo>. In that case, freeing `a` before the
995 // assignment may also free `a.b`!
996 //
997 // We could avoid this intermediary with some analysis
998 // to determine whether `dst` may possibly own `src`.
999 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1000 let src_datum = unpack_datum!(
1001 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
1002 let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
1003 let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
1004
1005 // 1. Drop the data at the destination, passing the
1006 // drop-hint in case the lvalue has already been
1007 // dropped or moved.
1008 bcx = glue::drop_ty_core(bcx,
1009 dst_datum.val,
1010 dst_datum.ty,
1011 expr.debug_loc(),
1012 false,
1013 opt_hint_val);
1014
1015 // 2. We are overwriting the destination; ensure that
1016 // its drop-hint (if any) says "initialized."
1017 if let Some(hint_val) = opt_hint_val {
1018 let hint_llval = hint_val.value();
1019 let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
1020 Store(bcx, drop_needed, hint_llval);
1021 }
1022 src_datum.store_to(bcx, dst_datum.val)
1023 } else {
1024 src_datum.store_to(bcx, dst_datum.val)
1025 }
1026 }
1027 hir::ExprAssignOp(op, ref dst, ref src) => {
1028 let has_method_map = bcx.tcx()
1029 .tables
1030 .borrow()
1031 .method_map
1032 .contains_key(&MethodCall::expr(expr.id));
1033
1034 if has_method_map {
1035 let dst = unpack_datum!(bcx, trans(bcx, &**dst));
1036 let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
1037 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), dst,
1038 Some((src_datum, src.id)), None, false).bcx
1039 } else {
1040 trans_assign_op(bcx, expr, op, &**dst, &**src)
1041 }
1042 }
1043 hir::ExprInlineAsm(ref a) => {
1044 asm::trans_inline_asm(bcx, a)
1045 }
1046 _ => {
1047 bcx.tcx().sess.span_bug(
1048 expr.span,
1049 &format!("trans_rvalue_stmt_unadjusted reached \
1050 fall-through case: {:?}",
1051 expr.node));
1052 }
1053 }
1054 }
1055
1056 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1057 expr: &hir::Expr,
1058 dest: Dest)
1059 -> Block<'blk, 'tcx> {
1060 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1061 let mut bcx = bcx;
1062 let tcx = bcx.tcx();
1063
1064 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1065
1066 match expr.node {
1067 hir::ExprPath(..) => {
1068 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1069 }
1070 hir::ExprIf(ref cond, ref thn, ref els) => {
1071 controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest)
1072 }
1073 hir::ExprMatch(ref discr, ref arms, _) => {
1074 _match::trans_match(bcx, expr, &**discr, &arms[..], dest)
1075 }
1076 hir::ExprBlock(ref blk) => {
1077 controlflow::trans_block(bcx, &**blk, dest)
1078 }
1079 hir::ExprStruct(_, ref fields, ref base) => {
1080 trans_struct(bcx,
1081 &fields[..],
1082 base.as_ref().map(|e| &**e),
1083 expr.span,
1084 expr.id,
1085 node_id_type(bcx, expr.id),
1086 dest)
1087 }
1088 hir::ExprRange(ref start, ref end) => {
1089 // FIXME it is just not right that we are synthesising ast nodes in
1090 // trans. Shudder.
1091 fn make_field(field_name: &str, expr: P<hir::Expr>) -> hir::Field {
1092 hir::Field {
1093 name: codemap::dummy_spanned(token::intern(field_name)),
1094 expr: expr,
1095 span: codemap::DUMMY_SP,
1096 }
1097 }
1098
1099 // A range just desugars into a struct.
1100 // Note that the type of the start and end may not be the same, but
1101 // they should only differ in their lifetime, which should not matter
1102 // in trans.
1103 let (did, fields, ty_params) = match (start, end) {
1104 (&Some(ref start), &Some(ref end)) => {
1105 // Desugar to Range
1106 let fields = vec![make_field("start", start.clone()),
1107 make_field("end", end.clone())];
1108 (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
1109 }
1110 (&Some(ref start), &None) => {
1111 // Desugar to RangeFrom
1112 let fields = vec![make_field("start", start.clone())];
1113 (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
1114 }
1115 (&None, &Some(ref end)) => {
1116 // Desugar to RangeTo
1117 let fields = vec![make_field("end", end.clone())];
1118 (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
1119 }
1120 _ => {
1121 // Desugar to RangeFull
1122 (tcx.lang_items.range_full_struct(), vec![], vec![])
1123 }
1124 };
1125
1126 if let Some(did) = did {
1127 let substs = Substs::new_type(ty_params, vec![]);
1128 trans_struct(bcx,
1129 &fields,
1130 None,
1131 expr.span,
1132 expr.id,
1133 tcx.mk_struct(tcx.lookup_adt_def(did),
1134 tcx.mk_substs(substs)),
1135 dest)
1136 } else {
1137 tcx.sess.span_bug(expr.span,
1138 "No lang item for ranges (how did we get this far?)")
1139 }
1140 }
1141 hir::ExprTup(ref args) => {
1142 let numbered_fields: Vec<(usize, &hir::Expr)> =
1143 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1144 trans_adt(bcx,
1145 expr_ty(bcx, expr),
1146 0,
1147 &numbered_fields[..],
1148 None,
1149 dest,
1150 expr.debug_loc())
1151 }
1152 hir::ExprLit(ref lit) => {
1153 match lit.node {
1154 ast::LitStr(ref s, _) => {
1155 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1156 }
1157 _ => {
1158 bcx.tcx()
1159 .sess
1160 .span_bug(expr.span,
1161 "trans_rvalue_dps_unadjusted shouldn't be \
1162 translating this type of literal")
1163 }
1164 }
1165 }
1166 hir::ExprVec(..) | hir::ExprRepeat(..) => {
1167 tvec::trans_fixed_vstore(bcx, expr, dest)
1168 }
1169 hir::ExprClosure(_, ref decl, ref body) => {
1170 let dest = match dest {
1171 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1172 Ignore => closure::Dest::Ignore(bcx.ccx())
1173 };
1174
1175 // NB. To get the id of the closure, we don't use
1176 // `local_def_id(id)`, but rather we extract the closure
1177 // def-id from the expr's type. This is because this may
1178 // be an inlined expression from another crate, and we
1179 // want to get the ORIGINAL closure def-id, since that is
1180 // the key we need to find the closure-kind and
1181 // closure-type etc.
1182 let (def_id, substs) = match expr_ty(bcx, expr).sty {
1183 ty::TyClosure(def_id, ref substs) => (def_id, substs),
1184 ref t =>
1185 bcx.tcx().sess.span_bug(
1186 expr.span,
1187 &format!("closure expr without closure type: {:?}", t)),
1188 };
1189
1190 closure::trans_closure_expr(dest, decl, body, expr.id, def_id, substs).unwrap_or(bcx)
1191 }
1192 hir::ExprCall(ref f, ref args) => {
1193 if bcx.tcx().is_method_call(expr.id) {
1194 trans_overloaded_call(bcx,
1195 expr,
1196 &**f,
1197 &args[..],
1198 Some(dest))
1199 } else {
1200 callee::trans_call(bcx,
1201 expr,
1202 &**f,
1203 callee::ArgExprs(&args[..]),
1204 dest)
1205 }
1206 }
1207 hir::ExprMethodCall(_, _, ref args) => {
1208 callee::trans_method_call(bcx,
1209 expr,
1210 &*args[0],
1211 callee::ArgExprs(&args[..]),
1212 dest)
1213 }
1214 hir::ExprBinary(op, ref lhs, ref rhs) => {
1215 // if not overloaded, would be RvalueDatumExpr
1216 let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
1217 let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
1218 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
1219 Some((rhs_datum, rhs.id)), Some(dest),
1220 !rustc_front::util::is_by_value_binop(op.node)).bcx
1221 }
1222 hir::ExprUnary(op, ref subexpr) => {
1223 // if not overloaded, would be RvalueDatumExpr
1224 let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
1225 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
1226 arg, None, Some(dest), !rustc_front::util::is_by_value_unop(op)).bcx
1227 }
1228 hir::ExprIndex(ref base, ref idx) => {
1229 // if not overloaded, would be RvalueDatumExpr
1230 let base = unpack_datum!(bcx, trans(bcx, &**base));
1231 let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
1232 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
1233 Some((idx_datum, idx.id)), Some(dest), true).bcx
1234 }
1235 hir::ExprCast(..) => {
1236 // Trait casts used to come this way, now they should be coercions.
1237 bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
1238 }
1239 hir::ExprAssignOp(op, _, _) => {
1240 bcx.tcx().sess.span_bug(
1241 expr.span,
1242 &format!("augmented assignment `{}=` should always be a rvalue_stmt",
1243 rustc_front::util::binop_to_string(op.node)))
1244 }
1245 _ => {
1246 bcx.tcx().sess.span_bug(
1247 expr.span,
1248 &format!("trans_rvalue_dps_unadjusted reached fall-through \
1249 case: {:?}",
1250 expr.node));
1251 }
1252 }
1253 }
1254
1255 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1256 ref_expr: &hir::Expr,
1257 def: def::Def,
1258 dest: Dest)
1259 -> Block<'blk, 'tcx> {
1260 let _icx = push_ctxt("trans_def_dps_unadjusted");
1261
1262 let lldest = match dest {
1263 SaveIn(lldest) => lldest,
1264 Ignore => { return bcx; }
1265 };
1266
1267 match def {
1268 def::DefVariant(tid, vid, _) => {
1269 let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
1270 if let ty::VariantKind::Tuple = variant.kind() {
1271 // N-ary variant.
1272 let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
1273 ExprId(ref_expr.id),
1274 bcx.fcx.param_substs).val;
1275 Store(bcx, llfn, lldest);
1276 return bcx;
1277 } else {
1278 // Nullary variant.
1279 let ty = expr_ty(bcx, ref_expr);
1280 let repr = adt::represent_type(bcx.ccx(), ty);
1281 adt::trans_set_discr(bcx, &*repr, lldest, variant.disr_val);
1282 return bcx;
1283 }
1284 }
1285 def::DefStruct(_) => {
1286 let ty = expr_ty(bcx, ref_expr);
1287 match ty.sty {
1288 ty::TyStruct(def, _) if def.has_dtor() => {
1289 let repr = adt::represent_type(bcx.ccx(), ty);
1290 adt::trans_set_discr(bcx, &*repr, lldest, 0);
1291 }
1292 _ => {}
1293 }
1294 bcx
1295 }
1296 _ => {
1297 bcx.tcx().sess.span_bug(ref_expr.span, &format!(
1298 "Non-DPS def {:?} referened by {}",
1299 def, bcx.node_id_to_string(ref_expr.id)));
1300 }
1301 }
1302 }
1303
1304 pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1305 ref_expr: &hir::Expr,
1306 def: def::Def,
1307 param_substs: &'tcx Substs<'tcx>)
1308 -> Datum<'tcx, Rvalue> {
1309 let _icx = push_ctxt("trans_def_datum_unadjusted");
1310
1311 match def {
1312 def::DefFn(did, _) |
1313 def::DefStruct(did) | def::DefVariant(_, did, _) => {
1314 callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
1315 }
1316 def::DefMethod(method_did) => {
1317 match ccx.tcx().impl_or_trait_item(method_did).container() {
1318 ty::ImplContainer(_) => {
1319 callee::trans_fn_ref(ccx, method_did,
1320 ExprId(ref_expr.id),
1321 param_substs)
1322 }
1323 ty::TraitContainer(trait_did) => {
1324 meth::trans_static_method_callee(ccx, method_did,
1325 trait_did, ref_expr.id,
1326 param_substs)
1327 }
1328 }
1329 }
1330 _ => {
1331 ccx.tcx().sess.span_bug(ref_expr.span, &format!(
1332 "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
1333 def,
1334 ref_expr));
1335 }
1336 }
1337 }
1338
1339 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1340 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1341 def: def::Def)
1342 -> Datum<'tcx, Lvalue> {
1343 let _icx = push_ctxt("trans_local_var");
1344
1345 match def {
1346 def::DefUpvar(_, nid, _, _) => {
1347 // Can't move upvars, so this is never a ZeroMemLastUse.
1348 let local_ty = node_id_type(bcx, nid);
1349 let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
1350 bcx, nid, HintKind::ZeroAndMaintain);
1351 match bcx.fcx.llupvars.borrow().get(&nid) {
1352 Some(&val) => Datum::new(val, local_ty, lval),
1353 None => {
1354 bcx.sess().bug(&format!(
1355 "trans_local_var: no llval for upvar {} found",
1356 nid));
1357 }
1358 }
1359 }
1360 def::DefLocal(_, nid) => {
1361 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
1362 Some(&v) => v,
1363 None => {
1364 bcx.sess().bug(&format!(
1365 "trans_local_var: no datum for local/arg {} found",
1366 nid));
1367 }
1368 };
1369 debug!("take_local(nid={}, v={}, ty={})",
1370 nid, bcx.val_to_string(datum.val), datum.ty);
1371 datum
1372 }
1373 _ => {
1374 bcx.sess().unimpl(&format!(
1375 "unsupported def type in trans_local_var: {:?}",
1376 def));
1377 }
1378 }
1379 }
1380
1381 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1382 fields: &[hir::Field],
1383 base: Option<&hir::Expr>,
1384 expr_span: codemap::Span,
1385 expr_id: ast::NodeId,
1386 ty: Ty<'tcx>,
1387 dest: Dest) -> Block<'blk, 'tcx> {
1388 let _icx = push_ctxt("trans_rec");
1389
1390 let tcx = bcx.tcx();
1391 let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
1392
1393 let mut need_base = vec![true; vinfo.fields.len()];
1394
1395 let numbered_fields = fields.iter().map(|field| {
1396 let pos = vinfo.field_index(field.name.node);
1397 need_base[pos] = false;
1398 (pos, &*field.expr)
1399 }).collect::<Vec<_>>();
1400
1401 let optbase = match base {
1402 Some(base_expr) => {
1403 let mut leftovers = Vec::new();
1404 for (i, b) in need_base.iter().enumerate() {
1405 if *b {
1406 leftovers.push((i, vinfo.fields[i].1));
1407 }
1408 }
1409 Some(StructBaseInfo {expr: base_expr,
1410 fields: leftovers })
1411 }
1412 None => {
1413 if need_base.iter().any(|b| *b) {
1414 tcx.sess.span_bug(expr_span, "missing fields and no base expr")
1415 }
1416 None
1417 }
1418 };
1419
1420 trans_adt(bcx,
1421 ty,
1422 vinfo.discr,
1423 &numbered_fields,
1424 optbase,
1425 dest,
1426 DebugLoc::At(expr_id, expr_span))
1427 }
1428
1429 /// Information that `trans_adt` needs in order to fill in the fields
1430 /// of a struct copied from a base struct (e.g., from an expression
1431 /// like `Foo { a: b, ..base }`.
1432 ///
1433 /// Note that `fields` may be empty; the base expression must always be
1434 /// evaluated for side-effects.
1435 pub struct StructBaseInfo<'a, 'tcx> {
1436 /// The base expression; will be evaluated after all explicit fields.
1437 expr: &'a hir::Expr,
1438 /// The indices of fields to copy paired with their types.
1439 fields: Vec<(usize, Ty<'tcx>)>
1440 }
1441
1442 /// Constructs an ADT instance:
1443 ///
1444 /// - `fields` should be a list of field indices paired with the
1445 /// expression to store into that field. The initializers will be
1446 /// evaluated in the order specified by `fields`.
1447 ///
1448 /// - `optbase` contains information on the base struct (if any) from
1449 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1450 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1451 ty: Ty<'tcx>,
1452 discr: ty::Disr,
1453 fields: &[(usize, &hir::Expr)],
1454 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1455 dest: Dest,
1456 debug_location: DebugLoc)
1457 -> Block<'blk, 'tcx> {
1458 let _icx = push_ctxt("trans_adt");
1459 let fcx = bcx.fcx;
1460 let repr = adt::represent_type(bcx.ccx(), ty);
1461
1462 debug_location.apply(bcx.fcx);
1463
1464 // If we don't care about the result, just make a
1465 // temporary stack slot
1466 let addr = match dest {
1467 SaveIn(pos) => pos,
1468 Ignore => {
1469 let llresult = alloc_ty(bcx, ty, "temp");
1470 call_lifetime_start(bcx, llresult);
1471 llresult
1472 }
1473 };
1474
1475 // This scope holds intermediates that must be cleaned should
1476 // panic occur before the ADT as a whole is ready.
1477 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1478
1479 if ty.is_simd() {
1480 // Issue 23112: The original logic appeared vulnerable to same
1481 // order-of-eval bug. But, SIMD values are tuple-structs;
1482 // i.e. functional record update (FRU) syntax is unavailable.
1483 //
1484 // To be safe, double-check that we did not get here via FRU.
1485 assert!(optbase.is_none());
1486
1487 // This is the constructor of a SIMD type, such types are
1488 // always primitive machine types and so do not have a
1489 // destructor or require any clean-up.
1490 let llty = type_of::type_of(bcx.ccx(), ty);
1491
1492 // keep a vector as a register, and running through the field
1493 // `insertelement`ing them directly into that register
1494 // (i.e. avoid GEPi and `store`s to an alloca) .
1495 let mut vec_val = C_undef(llty);
1496
1497 for &(i, ref e) in fields {
1498 let block_datum = trans(bcx, &**e);
1499 bcx = block_datum.bcx;
1500 let position = C_uint(bcx.ccx(), i);
1501 let value = block_datum.datum.to_llscalarish(bcx);
1502 vec_val = InsertElement(bcx, vec_val, value, position);
1503 }
1504 Store(bcx, vec_val, addr);
1505 } else if let Some(base) = optbase {
1506 // Issue 23112: If there is a base, then order-of-eval
1507 // requires field expressions eval'ed before base expression.
1508
1509 // First, trans field expressions to temporary scratch values.
1510 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1511 let datum = unpack_datum!(bcx, trans(bcx, &**e));
1512 (i, datum)
1513 }).collect();
1514
1515 debug_location.apply(bcx.fcx);
1516
1517 // Second, trans the base to the dest.
1518 assert_eq!(discr, 0);
1519
1520 let addr = adt::MaybeSizedValue::sized(addr);
1521 match expr_kind(bcx.tcx(), &*base.expr) {
1522 ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
1523 bcx = trans_into(bcx, &*base.expr, SaveIn(addr.value));
1524 },
1525 ExprKind::RvalueStmt => {
1526 bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
1527 }
1528 _ => {
1529 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
1530 for &(i, t) in &base.fields {
1531 let datum = base_datum.get_element(
1532 bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
1533 assert!(type_is_sized(bcx.tcx(), datum.ty));
1534 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1535 bcx = datum.store_to(bcx, dest);
1536 }
1537 }
1538 }
1539
1540 // Finally, move scratch field values into actual field locations
1541 for (i, datum) in scratch_vals {
1542 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1543 bcx = datum.store_to(bcx, dest);
1544 }
1545 } else {
1546 // No base means we can write all fields directly in place.
1547 let addr = adt::MaybeSizedValue::sized(addr);
1548 for &(i, ref e) in fields {
1549 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1550 let e_ty = expr_ty_adjusted(bcx, &**e);
1551 bcx = trans_into(bcx, &**e, SaveIn(dest));
1552 let scope = cleanup::CustomScope(custom_cleanup_scope);
1553 fcx.schedule_lifetime_end(scope, dest);
1554 // FIXME: nonzeroing move should generalize to fields
1555 fcx.schedule_drop_mem(scope, dest, e_ty, None);
1556 }
1557 }
1558
1559 adt::trans_set_discr(bcx, &*repr, addr, discr);
1560
1561 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1562
1563 // If we don't care about the result drop the temporary we made
1564 match dest {
1565 SaveIn(_) => bcx,
1566 Ignore => {
1567 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1568 base::call_lifetime_end(bcx, addr);
1569 bcx
1570 }
1571 }
1572 }
1573
1574
1575 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1576 expr: &hir::Expr,
1577 lit: &ast::Lit)
1578 -> DatumBlock<'blk, 'tcx, Expr> {
1579 // must not be a string constant, that is a RvalueDpsExpr
1580 let _icx = push_ctxt("trans_immediate_lit");
1581 let ty = expr_ty(bcx, expr);
1582 let v = consts::const_lit(bcx.ccx(), expr, lit);
1583 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1584 }
1585
1586 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1587 expr: &hir::Expr,
1588 op: hir::UnOp,
1589 sub_expr: &hir::Expr)
1590 -> DatumBlock<'blk, 'tcx, Expr> {
1591 let ccx = bcx.ccx();
1592 let mut bcx = bcx;
1593 let _icx = push_ctxt("trans_unary_datum");
1594
1595 let method_call = MethodCall::expr(expr.id);
1596
1597 // The only overloaded operator that is translated to a datum
1598 // is an overloaded deref, since it is always yields a `&T`.
1599 // Otherwise, we should be in the RvalueDpsExpr path.
1600 assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
1601
1602 let un_ty = expr_ty(bcx, expr);
1603
1604 let debug_loc = expr.debug_loc();
1605
1606 match op {
1607 hir::UnNot => {
1608 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1609 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1610 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1611 }
1612 hir::UnNeg => {
1613 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1614 let val = datum.to_llscalarish(bcx);
1615 let (bcx, llneg) = {
1616 if un_ty.is_fp() {
1617 let result = FNeg(bcx, val, debug_loc);
1618 (bcx, result)
1619 } else {
1620 let is_signed = un_ty.is_signed();
1621 let result = Neg(bcx, val, debug_loc);
1622 let bcx = if bcx.ccx().check_overflow() && is_signed {
1623 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1624 let is_min = ICmp(bcx, llvm::IntEQ, val,
1625 C_integral(llty, min, true), debug_loc);
1626 with_cond(bcx, is_min, |bcx| {
1627 let msg = InternedString::new(
1628 "attempted to negate with overflow");
1629 controlflow::trans_fail(bcx, expr_info(expr), msg)
1630 })
1631 } else {
1632 bcx
1633 };
1634 (bcx, result)
1635 }
1636 };
1637 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1638 }
1639 hir::UnDeref => {
1640 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1641 deref_once(bcx, expr, datum, method_call)
1642 }
1643 }
1644 }
1645
1646 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1647 box_expr: &hir::Expr,
1648 box_ty: Ty<'tcx>,
1649 contents: &hir::Expr,
1650 contents_ty: Ty<'tcx>)
1651 -> DatumBlock<'blk, 'tcx, Expr> {
1652 let _icx = push_ctxt("trans_uniq_expr");
1653 let fcx = bcx.fcx;
1654 assert!(type_is_sized(bcx.tcx(), contents_ty));
1655 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1656 let size = llsize_of(bcx.ccx(), llty);
1657 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1658 let llty_ptr = llty.ptr_to();
1659 let Result { bcx, val } = malloc_raw_dyn(bcx,
1660 llty_ptr,
1661 box_ty,
1662 size,
1663 align,
1664 box_expr.debug_loc());
1665 // Unique boxes do not allocate for zero-size types. The standard library
1666 // may assume that `free` is never called on the pointer returned for
1667 // `Box<ZeroSizeType>`.
1668 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1669 trans_into(bcx, contents, SaveIn(val))
1670 } else {
1671 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1672 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1673 val, cleanup::HeapExchange, contents_ty);
1674 let bcx = trans_into(bcx, contents, SaveIn(val));
1675 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1676 bcx
1677 };
1678 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1679 }
1680
1681 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1682 expr: &hir::Expr,
1683 subexpr: &hir::Expr)
1684 -> DatumBlock<'blk, 'tcx, Expr> {
1685 let _icx = push_ctxt("trans_addr_of");
1686 let mut bcx = bcx;
1687 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1688 let ty = expr_ty(bcx, expr);
1689 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1690 // Always generate an lvalue datum, because this pointer doesn't own
1691 // the data and cleanup is scheduled elsewhere.
1692 DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
1693 } else {
1694 // Sized value, ref to a thin pointer
1695 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1696 }
1697 }
1698
1699 fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1700 binop_expr: &hir::Expr,
1701 binop_ty: Ty<'tcx>,
1702 op: hir::BinOp,
1703 lhs: Datum<'tcx, Rvalue>,
1704 rhs: Datum<'tcx, Rvalue>)
1705 -> DatumBlock<'blk, 'tcx, Expr>
1706 {
1707 let _icx = push_ctxt("trans_scalar_binop");
1708
1709 let tcx = bcx.tcx();
1710 let lhs_t = lhs.ty;
1711 assert!(!lhs_t.is_simd());
1712 let is_float = lhs_t.is_fp();
1713 let is_signed = lhs_t.is_signed();
1714 let info = expr_info(binop_expr);
1715
1716 let binop_debug_loc = binop_expr.debug_loc();
1717
1718 let mut bcx = bcx;
1719 let lhs = lhs.to_llscalarish(bcx);
1720 let rhs = rhs.to_llscalarish(bcx);
1721 let val = match op.node {
1722 hir::BiAdd => {
1723 if is_float {
1724 FAdd(bcx, lhs, rhs, binop_debug_loc)
1725 } else {
1726 let (newbcx, res) = with_overflow_check(
1727 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1728 bcx = newbcx;
1729 res
1730 }
1731 }
1732 hir::BiSub => {
1733 if is_float {
1734 FSub(bcx, lhs, rhs, binop_debug_loc)
1735 } else {
1736 let (newbcx, res) = with_overflow_check(
1737 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1738 bcx = newbcx;
1739 res
1740 }
1741 }
1742 hir::BiMul => {
1743 if is_float {
1744 FMul(bcx, lhs, rhs, binop_debug_loc)
1745 } else {
1746 let (newbcx, res) = with_overflow_check(
1747 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1748 bcx = newbcx;
1749 res
1750 }
1751 }
1752 hir::BiDiv => {
1753 if is_float {
1754 FDiv(bcx, lhs, rhs, binop_debug_loc)
1755 } else {
1756 // Only zero-check integers; fp /0 is NaN
1757 bcx = base::fail_if_zero_or_overflows(bcx,
1758 expr_info(binop_expr),
1759 op,
1760 lhs,
1761 rhs,
1762 lhs_t);
1763 if is_signed {
1764 SDiv(bcx, lhs, rhs, binop_debug_loc)
1765 } else {
1766 UDiv(bcx, lhs, rhs, binop_debug_loc)
1767 }
1768 }
1769 }
1770 hir::BiRem => {
1771 if is_float {
1772 // LLVM currently always lowers the `frem` instructions appropriate
1773 // library calls typically found in libm. Notably f64 gets wired up
1774 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1775 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1776 // instead just an inline function in a header that goes up to a
1777 // f64, uses `fmod`, and then comes back down to a f32.
1778 //
1779 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1780 // still unconditionally lower frem instructions over 32-bit floats
1781 // to a call to `fmodf`. To work around this we special case MSVC
1782 // 32-bit float rem instructions and instead do the call out to
1783 // `fmod` ourselves.
1784 //
1785 // Note that this is currently duplicated with src/libcore/ops.rs
1786 // which does the same thing, and it would be nice to perhaps unify
1787 // these two implementations on day! Also note that we call `fmod`
1788 // for both 32 and 64-bit floats because if we emit any FRem
1789 // instruction at all then LLVM is capable of optimizing it into a
1790 // 32-bit FRem (which we're trying to avoid).
1791 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
1792 tcx.sess.target.target.arch == "x86";
1793 if use_fmod {
1794 let f64t = Type::f64(bcx.ccx());
1795 let fty = Type::func(&[f64t, f64t], &f64t);
1796 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
1797 tcx.types.f64);
1798 if lhs_t == tcx.types.f32 {
1799 let lhs = FPExt(bcx, lhs, f64t);
1800 let rhs = FPExt(bcx, rhs, f64t);
1801 let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
1802 FPTrunc(bcx, res, Type::f32(bcx.ccx()))
1803 } else {
1804 Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc)
1805 }
1806 } else {
1807 FRem(bcx, lhs, rhs, binop_debug_loc)
1808 }
1809 } else {
1810 // Only zero-check integers; fp %0 is NaN
1811 bcx = base::fail_if_zero_or_overflows(bcx,
1812 expr_info(binop_expr),
1813 op, lhs, rhs, lhs_t);
1814 if is_signed {
1815 SRem(bcx, lhs, rhs, binop_debug_loc)
1816 } else {
1817 URem(bcx, lhs, rhs, binop_debug_loc)
1818 }
1819 }
1820 }
1821 hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1822 hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1823 hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1824 hir::BiShl => {
1825 let (newbcx, res) = with_overflow_check(
1826 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1827 bcx = newbcx;
1828 res
1829 }
1830 hir::BiShr => {
1831 let (newbcx, res) = with_overflow_check(
1832 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1833 bcx = newbcx;
1834 res
1835 }
1836 hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
1837 base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
1838 }
1839 _ => {
1840 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1841 }
1842 };
1843
1844 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1845 }
1846
1847 // refinement types would obviate the need for this
1848 enum lazy_binop_ty {
1849 lazy_and,
1850 lazy_or,
1851 }
1852
1853 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1854 binop_expr: &hir::Expr,
1855 op: lazy_binop_ty,
1856 a: &hir::Expr,
1857 b: &hir::Expr)
1858 -> DatumBlock<'blk, 'tcx, Expr> {
1859 let _icx = push_ctxt("trans_lazy_binop");
1860 let binop_ty = expr_ty(bcx, binop_expr);
1861 let fcx = bcx.fcx;
1862
1863 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1864 let lhs = lhs.to_llscalarish(past_lhs);
1865
1866 if past_lhs.unreachable.get() {
1867 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1868 }
1869
1870 let join = fcx.new_id_block("join", binop_expr.id);
1871 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1872
1873 match op {
1874 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1875 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1876 }
1877
1878 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1879 let rhs = rhs.to_llscalarish(past_rhs);
1880
1881 if past_rhs.unreachable.get() {
1882 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1883 }
1884
1885 Br(past_rhs, join.llbb, DebugLoc::None);
1886 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1887 &[past_lhs.llbb, past_rhs.llbb]);
1888
1889 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1890 }
1891
1892 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1893 expr: &hir::Expr,
1894 op: hir::BinOp,
1895 lhs: &hir::Expr,
1896 rhs: &hir::Expr)
1897 -> DatumBlock<'blk, 'tcx, Expr> {
1898 let _icx = push_ctxt("trans_binary");
1899 let ccx = bcx.ccx();
1900
1901 // if overloaded, would be RvalueDpsExpr
1902 assert!(!ccx.tcx().is_method_call(expr.id));
1903
1904 match op.node {
1905 hir::BiAnd => {
1906 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1907 }
1908 hir::BiOr => {
1909 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1910 }
1911 _ => {
1912 let mut bcx = bcx;
1913 let binop_ty = expr_ty(bcx, expr);
1914
1915 let lhs = unpack_datum!(bcx, trans(bcx, lhs));
1916 let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
1917 debug!("trans_binary (expr {}): lhs={}",
1918 expr.id, lhs.to_string(ccx));
1919 let rhs = unpack_datum!(bcx, trans(bcx, rhs));
1920 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
1921 debug!("trans_binary (expr {}): rhs={}",
1922 expr.id, rhs.to_string(ccx));
1923
1924 if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
1925 assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
1926 "built-in binary operators on fat pointers are homogeneous");
1927 assert_eq!(binop_ty, bcx.tcx().types.bool);
1928 let val = base::compare_scalar_types(
1929 bcx,
1930 lhs.val,
1931 rhs.val,
1932 lhs.ty,
1933 op.node,
1934 expr.debug_loc());
1935 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1936 } else {
1937 assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
1938 "built-in binary operators on fat pointers are homogeneous");
1939 trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
1940 }
1941 }
1942 }
1943 }
1944
1945 fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1946 expr: &hir::Expr,
1947 method_call: MethodCall,
1948 lhs: Datum<'tcx, Expr>,
1949 rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
1950 dest: Option<Dest>,
1951 autoref: bool)
1952 -> Result<'blk, 'tcx> {
1953 callee::trans_call_inner(bcx,
1954 expr.debug_loc(),
1955 |bcx, arg_cleanup_scope| {
1956 meth::trans_method_callee(bcx,
1957 method_call,
1958 None,
1959 arg_cleanup_scope)
1960 },
1961 callee::ArgOverloadedOp(lhs, rhs, autoref),
1962 dest)
1963 }
1964
1965 fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1966 expr: &hir::Expr,
1967 callee: &'a hir::Expr,
1968 args: &'a [P<hir::Expr>],
1969 dest: Option<Dest>)
1970 -> Block<'blk, 'tcx> {
1971 debug!("trans_overloaded_call {}", expr.id);
1972 let method_call = MethodCall::expr(expr.id);
1973 let mut all_args = vec!(callee);
1974 all_args.extend(args.iter().map(|e| &**e));
1975 unpack_result!(bcx,
1976 callee::trans_call_inner(bcx,
1977 expr.debug_loc(),
1978 |bcx, arg_cleanup_scope| {
1979 meth::trans_method_callee(
1980 bcx,
1981 method_call,
1982 None,
1983 arg_cleanup_scope)
1984 },
1985 callee::ArgOverloadedCall(all_args),
1986 dest));
1987 bcx
1988 }
1989
1990 pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
1991 expr: &hir::Expr,
1992 t_in: Ty<'tcx>,
1993 t_out: Ty<'tcx>)
1994 -> bool {
1995 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
1996 return true;
1997 }
1998
1999 match (t_in.builtin_deref(true, ty::NoPreference),
2000 t_out.builtin_deref(true, ty::NoPreference)) {
2001 (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
2002 t_in == t_out
2003 }
2004 _ => {
2005 // This condition isn't redundant with the check for CoercionCast:
2006 // different types can be substituted into the same type, and
2007 // == equality can be overconservative if there are regions.
2008 t_in == t_out
2009 }
2010 }
2011 }
2012
2013 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2014 expr: &hir::Expr,
2015 id: ast::NodeId)
2016 -> DatumBlock<'blk, 'tcx, Expr>
2017 {
2018 use middle::ty::cast::CastTy::*;
2019 use middle::ty::cast::IntTy::*;
2020
2021 fn int_cast(bcx: Block,
2022 lldsttype: Type,
2023 llsrctype: Type,
2024 llsrc: ValueRef,
2025 signed: bool)
2026 -> ValueRef
2027 {
2028 let _icx = push_ctxt("int_cast");
2029 let srcsz = llsrctype.int_width();
2030 let dstsz = lldsttype.int_width();
2031 return if dstsz == srcsz {
2032 BitCast(bcx, llsrc, lldsttype)
2033 } else if srcsz > dstsz {
2034 TruncOrBitCast(bcx, llsrc, lldsttype)
2035 } else if signed {
2036 SExtOrBitCast(bcx, llsrc, lldsttype)
2037 } else {
2038 ZExtOrBitCast(bcx, llsrc, lldsttype)
2039 }
2040 }
2041
2042 fn float_cast(bcx: Block,
2043 lldsttype: Type,
2044 llsrctype: Type,
2045 llsrc: ValueRef)
2046 -> ValueRef
2047 {
2048 let _icx = push_ctxt("float_cast");
2049 let srcsz = llsrctype.float_width();
2050 let dstsz = lldsttype.float_width();
2051 return if dstsz > srcsz {
2052 FPExt(bcx, llsrc, lldsttype)
2053 } else if srcsz > dstsz {
2054 FPTrunc(bcx, llsrc, lldsttype)
2055 } else { llsrc };
2056 }
2057
2058 let _icx = push_ctxt("trans_cast");
2059 let mut bcx = bcx;
2060 let ccx = bcx.ccx();
2061
2062 let t_in = expr_ty_adjusted(bcx, expr);
2063 let t_out = node_id_type(bcx, id);
2064
2065 debug!("trans_cast({:?} as {:?})", t_in, t_out);
2066 let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
2067 let ll_t_out = type_of::arg_type_of(ccx, t_out);
2068 // Convert the value to be cast into a ValueRef, either by-ref or
2069 // by-value as appropriate given its type:
2070 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
2071
2072 let datum_ty = monomorphize_type(bcx, datum.ty);
2073
2074 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
2075 datum.ty = t_out;
2076 return DatumBlock::new(bcx, datum);
2077 }
2078
2079 if type_is_fat_ptr(bcx.tcx(), t_in) {
2080 assert!(datum.kind.is_by_ref());
2081 if type_is_fat_ptr(bcx.tcx(), t_out) {
2082 return DatumBlock::new(bcx, Datum::new(
2083 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
2084 t_out,
2085 Rvalue::new(ByRef)
2086 )).to_expr_datumblock();
2087 } else {
2088 // Return the address
2089 return immediate_rvalue_bcx(bcx,
2090 PointerCast(bcx,
2091 Load(bcx, get_dataptr(bcx, datum.val)),
2092 ll_t_out),
2093 t_out).to_expr_datumblock();
2094 }
2095 }
2096
2097 let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
2098 let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
2099
2100 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
2101 let repr = adt::represent_type(ccx, t_in);
2102 let datum = unpack_datum!(
2103 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
2104 let llexpr_ptr = datum.to_llref();
2105 let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
2106 ll_t_in = val_ty(discr);
2107 (discr, adt::is_discr_signed(&*repr))
2108 } else {
2109 (datum.to_llscalarish(bcx), t_in.is_signed())
2110 };
2111
2112 let newval = match (r_t_in, r_t_out) {
2113 (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
2114 PointerCast(bcx, llexpr, ll_t_out)
2115 }
2116 (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
2117 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
2118
2119 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
2120 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
2121 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
2122 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
2123 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
2124 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
2125
2126 _ => ccx.sess().span_bug(expr.span,
2127 &format!("translating unsupported cast: \
2128 {:?} -> {:?}",
2129 t_in,
2130 t_out)
2131 )
2132 };
2133 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
2134 }
2135
2136 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2137 expr: &hir::Expr,
2138 op: hir::BinOp,
2139 dst: &hir::Expr,
2140 src: &hir::Expr)
2141 -> Block<'blk, 'tcx> {
2142 let _icx = push_ctxt("trans_assign_op");
2143 let mut bcx = bcx;
2144
2145 debug!("trans_assign_op(expr={:?})", expr);
2146
2147 // User-defined operator methods cannot be used with `+=` etc right now
2148 assert!(!bcx.tcx().is_method_call(expr.id));
2149
2150 // Evaluate LHS (destination), which should be an lvalue
2151 let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
2152 assert!(!bcx.fcx.type_needs_drop(dst.ty));
2153 let lhs = load_ty(bcx, dst.val, dst.ty);
2154 let lhs = immediate_rvalue(lhs, dst.ty);
2155
2156 // Evaluate RHS - FIXME(#28160) this sucks
2157 let rhs = unpack_datum!(bcx, trans(bcx, &*src));
2158 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
2159
2160 // Perform computation and store the result
2161 let result_datum = unpack_datum!(
2162 bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
2163 return result_datum.store_to(bcx, dst.val);
2164 }
2165
2166 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2167 datum: Datum<'tcx, Expr>,
2168 expr: &hir::Expr)
2169 -> DatumBlock<'blk, 'tcx, Expr> {
2170 let mut bcx = bcx;
2171
2172 // Ensure cleanup of `datum` if not already scheduled and obtain
2173 // a "by ref" pointer.
2174 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2175
2176 // Compute final type. Note that we are loose with the region and
2177 // mutability, since those things don't matter in trans.
2178 let referent_ty = lv_datum.ty;
2179 let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2180
2181 // Get the pointer.
2182 let llref = lv_datum.to_llref();
2183
2184 // Construct the resulting datum, using what was the "by ref"
2185 // ValueRef of type `referent_ty` to be the "by value" ValueRef
2186 // of type `&referent_ty`.
2187 // Pointers to DST types are non-immediate, and therefore still use ByRef.
2188 let kind = if type_is_sized(bcx.tcx(), referent_ty) { ByValue } else { ByRef };
2189 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(kind))))
2190 }
2191
2192 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2193 expr: &hir::Expr,
2194 datum: Datum<'tcx, Expr>,
2195 times: usize)
2196 -> DatumBlock<'blk, 'tcx, Expr> {
2197 let mut bcx = bcx;
2198 let mut datum = datum;
2199 for i in 0..times {
2200 let method_call = MethodCall::autoderef(expr.id, i as u32);
2201 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2202 }
2203 DatumBlock { bcx: bcx, datum: datum }
2204 }
2205
2206 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2207 expr: &hir::Expr,
2208 datum: Datum<'tcx, Expr>,
2209 method_call: MethodCall)
2210 -> DatumBlock<'blk, 'tcx, Expr> {
2211 let ccx = bcx.ccx();
2212
2213 debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
2214 expr,
2215 datum.to_string(ccx),
2216 method_call);
2217
2218 let mut bcx = bcx;
2219
2220 // Check for overloaded deref.
2221 let method_ty = ccx.tcx()
2222 .tables
2223 .borrow()
2224 .method_map
2225 .get(&method_call).map(|method| method.ty);
2226
2227 let datum = match method_ty {
2228 Some(method_ty) => {
2229 let method_ty = monomorphize_type(bcx, method_ty);
2230
2231 // Overloaded. Evaluate `trans_overloaded_op`, which will
2232 // invoke the user's deref() method, which basically
2233 // converts from the `Smaht<T>` pointer that we have into
2234 // a `&T` pointer. We can then proceed down the normal
2235 // path (below) to dereference that `&T`.
2236 let datum = if method_call.autoderef == 0 {
2237 datum
2238 } else {
2239 // Always perform an AutoPtr when applying an overloaded auto-deref
2240 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2241 };
2242
2243 let ref_ty = // invoked methods have their LB regions instantiated
2244 ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
2245 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2246
2247 unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
2248 datum, None, Some(SaveIn(scratch.val)),
2249 false));
2250 scratch.to_expr_datum()
2251 }
2252 None => {
2253 // Not overloaded. We already have a pointer we know how to deref.
2254 datum
2255 }
2256 };
2257
2258 let r = match datum.ty.sty {
2259 ty::TyBox(content_ty) => {
2260 // Make sure we have an lvalue datum here to get the
2261 // proper cleanups scheduled
2262 let datum = unpack_datum!(
2263 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2264
2265 if type_is_sized(bcx.tcx(), content_ty) {
2266 let ptr = load_ty(bcx, datum.val, datum.ty);
2267 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
2268 } else {
2269 // A fat pointer and a DST lvalue have the same representation
2270 // just different types. Since there is no temporary for `*e`
2271 // here (because it is unsized), we cannot emulate the sized
2272 // object code path for running drop glue and free. Instead,
2273 // we schedule cleanup for `e`, turning it into an lvalue.
2274
2275 let lval = Lvalue::new("expr::deref_once ty_uniq");
2276 let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
2277 DatumBlock::new(bcx, datum)
2278 }
2279 }
2280
2281 ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
2282 ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
2283 let lval = Lvalue::new("expr::deref_once ptr");
2284 if type_is_sized(bcx.tcx(), content_ty) {
2285 let ptr = datum.to_llscalarish(bcx);
2286
2287 // Always generate an lvalue datum, even if datum.mode is
2288 // an rvalue. This is because datum.mode is only an
2289 // rvalue for non-owning pointers like &T or *T, in which
2290 // case cleanup *is* scheduled elsewhere, by the true
2291 // owner (or, in the case of *T, by the user).
2292 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
2293 } else {
2294 // A fat pointer and a DST lvalue have the same representation
2295 // just different types.
2296 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
2297 }
2298 }
2299
2300 _ => {
2301 bcx.tcx().sess.span_bug(
2302 expr.span,
2303 &format!("deref invoked on expr of invalid type {:?}",
2304 datum.ty));
2305 }
2306 };
2307
2308 debug!("deref_once(expr={}, method_call={:?}, result={})",
2309 expr.id, method_call, r.datum.to_string(ccx));
2310
2311 return r;
2312 }
2313
2314 #[derive(Debug)]
2315 enum OverflowOp {
2316 Add,
2317 Sub,
2318 Mul,
2319 Shl,
2320 Shr,
2321 }
2322
2323 impl OverflowOp {
2324 fn codegen_strategy(&self) -> OverflowCodegen {
2325 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2326 match *self {
2327 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2328 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2329 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2330
2331 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2332 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2333 }
2334 }
2335 }
2336
2337 enum OverflowCodegen {
2338 ViaIntrinsic(OverflowOpViaIntrinsic),
2339 ViaInputCheck(OverflowOpViaInputCheck),
2340 }
2341
2342 enum OverflowOpViaInputCheck { Shl, Shr, }
2343
2344 #[derive(Debug)]
2345 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2346
2347 impl OverflowOpViaIntrinsic {
2348 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2349 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2350 bcx.ccx().get_intrinsic(&name)
2351 }
2352 fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
2353 use syntax::ast::IntTy::*;
2354 use syntax::ast::UintTy::*;
2355 use middle::ty::{TyInt, TyUint};
2356
2357 let new_sty = match ty.sty {
2358 TyInt(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] {
2359 "32" => TyInt(TyI32),
2360 "64" => TyInt(TyI64),
2361 _ => panic!("unsupported target word size")
2362 },
2363 TyUint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] {
2364 "32" => TyUint(TyU32),
2365 "64" => TyUint(TyU64),
2366 _ => panic!("unsupported target word size")
2367 },
2368 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
2369 _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
2370 *self)
2371 };
2372
2373 match *self {
2374 OverflowOpViaIntrinsic::Add => match new_sty {
2375 TyInt(TyI8) => "llvm.sadd.with.overflow.i8",
2376 TyInt(TyI16) => "llvm.sadd.with.overflow.i16",
2377 TyInt(TyI32) => "llvm.sadd.with.overflow.i32",
2378 TyInt(TyI64) => "llvm.sadd.with.overflow.i64",
2379
2380 TyUint(TyU8) => "llvm.uadd.with.overflow.i8",
2381 TyUint(TyU16) => "llvm.uadd.with.overflow.i16",
2382 TyUint(TyU32) => "llvm.uadd.with.overflow.i32",
2383 TyUint(TyU64) => "llvm.uadd.with.overflow.i64",
2384
2385 _ => unreachable!(),
2386 },
2387 OverflowOpViaIntrinsic::Sub => match new_sty {
2388 TyInt(TyI8) => "llvm.ssub.with.overflow.i8",
2389 TyInt(TyI16) => "llvm.ssub.with.overflow.i16",
2390 TyInt(TyI32) => "llvm.ssub.with.overflow.i32",
2391 TyInt(TyI64) => "llvm.ssub.with.overflow.i64",
2392
2393 TyUint(TyU8) => "llvm.usub.with.overflow.i8",
2394 TyUint(TyU16) => "llvm.usub.with.overflow.i16",
2395 TyUint(TyU32) => "llvm.usub.with.overflow.i32",
2396 TyUint(TyU64) => "llvm.usub.with.overflow.i64",
2397
2398 _ => unreachable!(),
2399 },
2400 OverflowOpViaIntrinsic::Mul => match new_sty {
2401 TyInt(TyI8) => "llvm.smul.with.overflow.i8",
2402 TyInt(TyI16) => "llvm.smul.with.overflow.i16",
2403 TyInt(TyI32) => "llvm.smul.with.overflow.i32",
2404 TyInt(TyI64) => "llvm.smul.with.overflow.i64",
2405
2406 TyUint(TyU8) => "llvm.umul.with.overflow.i8",
2407 TyUint(TyU16) => "llvm.umul.with.overflow.i16",
2408 TyUint(TyU32) => "llvm.umul.with.overflow.i32",
2409 TyUint(TyU64) => "llvm.umul.with.overflow.i64",
2410
2411 _ => unreachable!(),
2412 },
2413 }
2414 }
2415
2416 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2417 info: NodeIdAndSpan,
2418 lhs_t: Ty<'tcx>, lhs: ValueRef,
2419 rhs: ValueRef,
2420 binop_debug_loc: DebugLoc)
2421 -> (Block<'blk, 'tcx>, ValueRef) {
2422 let llfn = self.to_intrinsic(bcx, lhs_t);
2423
2424 let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
2425 let result = ExtractValue(bcx, val, 0); // iN operation result
2426 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2427
2428 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2429 binop_debug_loc);
2430
2431 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2432 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2433 None, binop_debug_loc);
2434
2435 let bcx =
2436 base::with_cond(bcx, cond, |bcx|
2437 controlflow::trans_fail(bcx, info,
2438 InternedString::new("arithmetic operation overflowed")));
2439
2440 (bcx, result)
2441 }
2442 }
2443
2444 impl OverflowOpViaInputCheck {
2445 fn build_with_input_check<'blk, 'tcx>(&self,
2446 bcx: Block<'blk, 'tcx>,
2447 info: NodeIdAndSpan,
2448 lhs_t: Ty<'tcx>,
2449 lhs: ValueRef,
2450 rhs: ValueRef,
2451 binop_debug_loc: DebugLoc)
2452 -> (Block<'blk, 'tcx>, ValueRef)
2453 {
2454 let lhs_llty = val_ty(lhs);
2455 let rhs_llty = val_ty(rhs);
2456
2457 // Panic if any bits are set outside of bits that we always
2458 // mask in.
2459 //
2460 // Note that the mask's value is derived from the LHS type
2461 // (since that is where the 32/64 distinction is relevant) but
2462 // the mask's type must match the RHS type (since they will
2463 // both be fed into an and-binop)
2464 let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
2465
2466 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2467 let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
2468 let result = match *self {
2469 OverflowOpViaInputCheck::Shl =>
2470 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2471 OverflowOpViaInputCheck::Shr =>
2472 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2473 };
2474 let bcx =
2475 base::with_cond(bcx, cond, |bcx|
2476 controlflow::trans_fail(bcx, info,
2477 InternedString::new("shift operation overflowed")));
2478
2479 (bcx, result)
2480 }
2481 }
2482
2483 // Check if an integer or vector contains a nonzero element.
2484 fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2485 value: ValueRef,
2486 binop_debug_loc: DebugLoc) -> ValueRef {
2487 let llty = val_ty(value);
2488 let kind = llty.kind();
2489 match kind {
2490 TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
2491 TypeKind::Vector => {
2492 // Check if any elements of the vector are nonzero by treating
2493 // it as a wide integer and checking if the integer is nonzero.
2494 let width = llty.vector_length() as u64 * llty.element_type().int_width();
2495 let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
2496 build_nonzero_check(bcx, int_value, binop_debug_loc)
2497 },
2498 _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
2499 }
2500 }
2501
2502 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2503 lhs_t: Ty<'tcx>, lhs: ValueRef,
2504 rhs: ValueRef,
2505 binop_debug_loc: DebugLoc)
2506 -> (Block<'blk, 'tcx>, ValueRef) {
2507 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2508 if bcx.ccx().check_overflow() {
2509
2510 match oop.codegen_strategy() {
2511 OverflowCodegen::ViaIntrinsic(oop) =>
2512 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2513 OverflowCodegen::ViaInputCheck(oop) =>
2514 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2515 }
2516 } else {
2517 let res = match oop {
2518 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2519 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2520 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2521
2522 OverflowOp::Shl =>
2523 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2524 OverflowOp::Shr =>
2525 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2526 };
2527 (bcx, res)
2528 }
2529 }
2530
2531 /// We categorize expressions into three kinds. The distinction between
2532 /// lvalue/rvalue is fundamental to the language. The distinction between the
2533 /// two kinds of rvalues is an artifact of trans which reflects how we will
2534 /// generate code for that kind of expression. See trans/expr.rs for more
2535 /// information.
2536 #[derive(Copy, Clone)]
2537 enum ExprKind {
2538 Lvalue,
2539 RvalueDps,
2540 RvalueDatum,
2541 RvalueStmt
2542 }
2543
2544 fn expr_kind(tcx: &ty::ctxt, expr: &hir::Expr) -> ExprKind {
2545 if tcx.is_method_call(expr.id) {
2546 // Overloaded operations are generally calls, and hence they are
2547 // generated via DPS, but there are a few exceptions:
2548 return match expr.node {
2549 // `a += b` has a unit result.
2550 hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
2551
2552 // the deref method invoked for `*a` always yields an `&T`
2553 hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
2554
2555 // the index method invoked for `a[i]` always yields an `&T`
2556 hir::ExprIndex(..) => ExprKind::Lvalue,
2557
2558 // in the general case, result could be any type, use DPS
2559 _ => ExprKind::RvalueDps
2560 };
2561 }
2562
2563 match expr.node {
2564 hir::ExprPath(..) => {
2565 match tcx.resolve_expr(expr) {
2566 def::DefStruct(_) | def::DefVariant(..) => {
2567 if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
2568 // ctor function
2569 ExprKind::RvalueDatum
2570 } else {
2571 ExprKind::RvalueDps
2572 }
2573 }
2574
2575 // Special case: A unit like struct's constructor must be called without () at the
2576 // end (like `UnitStruct`) which means this is an ExprPath to a DefFn. But in case
2577 // of unit structs this is should not be interpreted as function pointer but as
2578 // call to the constructor.
2579 def::DefFn(_, true) => ExprKind::RvalueDps,
2580
2581 // Fn pointers are just scalar values.
2582 def::DefFn(..) | def::DefMethod(..) => ExprKind::RvalueDatum,
2583
2584 // Note: there is actually a good case to be made that
2585 // DefArg's, particularly those of immediate type, ought to
2586 // considered rvalues.
2587 def::DefStatic(..) |
2588 def::DefUpvar(..) |
2589 def::DefLocal(..) => ExprKind::Lvalue,
2590
2591 def::DefConst(..) |
2592 def::DefAssociatedConst(..) => ExprKind::RvalueDatum,
2593
2594 def => {
2595 tcx.sess.span_bug(
2596 expr.span,
2597 &format!("uncategorized def for expr {}: {:?}",
2598 expr.id,
2599 def));
2600 }
2601 }
2602 }
2603
2604 hir::ExprUnary(hir::UnDeref, _) |
2605 hir::ExprField(..) |
2606 hir::ExprTupField(..) |
2607 hir::ExprIndex(..) => {
2608 ExprKind::Lvalue
2609 }
2610
2611 hir::ExprCall(..) |
2612 hir::ExprMethodCall(..) |
2613 hir::ExprStruct(..) |
2614 hir::ExprRange(..) |
2615 hir::ExprTup(..) |
2616 hir::ExprIf(..) |
2617 hir::ExprMatch(..) |
2618 hir::ExprClosure(..) |
2619 hir::ExprBlock(..) |
2620 hir::ExprRepeat(..) |
2621 hir::ExprVec(..) => {
2622 ExprKind::RvalueDps
2623 }
2624
2625 hir::ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {
2626 ExprKind::RvalueDps
2627 }
2628
2629 hir::ExprBreak(..) |
2630 hir::ExprAgain(..) |
2631 hir::ExprRet(..) |
2632 hir::ExprWhile(..) |
2633 hir::ExprLoop(..) |
2634 hir::ExprAssign(..) |
2635 hir::ExprInlineAsm(..) |
2636 hir::ExprAssignOp(..) => {
2637 ExprKind::RvalueStmt
2638 }
2639
2640 hir::ExprLit(_) | // Note: LitStr is carved out above
2641 hir::ExprUnary(..) |
2642 hir::ExprBox(_) |
2643 hir::ExprAddrOf(..) |
2644 hir::ExprBinary(..) |
2645 hir::ExprCast(..) => {
2646 ExprKind::RvalueDatum
2647 }
2648 }
2649 }