1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_var -> Datum`: looks up a local variable, upvar or static.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest
::*;
52 use self::lazy_binop_ty
::*;
54 use llvm
::{self, ValueRef, TypeKind}
;
55 use middle
::const_qualif
::ConstQualif
;
56 use rustc
::hir
::def
::Def
;
57 use rustc
::ty
::subst
::Substs
;
58 use {_match, abi, adt, asm, base, closure, consts, controlflow}
;
61 use callee
::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp}
;
62 use cleanup
::{self, CleanupMethods, DropHintMethods}
;
65 use debuginfo
::{self, DebugLoc, ToDebugLoc}
;
73 use rustc
::ty
::adjustment
::{AdjustDerefRef, AdjustReifyFnPointer}
;
74 use rustc
::ty
::adjustment
::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}
;
75 use rustc
::ty
::adjustment
::CustomCoerceUnsized
;
76 use rustc
::ty
::{self, Ty, TyCtxt}
;
77 use rustc
::ty
::MethodCall
;
78 use rustc
::ty
::cast
::{CastKind, CastTy}
;
79 use util
::common
::indenter
;
80 use machine
::{llsize_of, llsize_of_alloc}
;
85 use syntax
::{ast, codemap}
;
86 use syntax
::parse
::token
::InternedString
;
92 // These are passed around by the code generating functions to track the
93 // destination of a computation's value.
95 #[derive(Copy, Clone, PartialEq)]
101 impl fmt
::Debug
for Dest
{
102 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
104 SaveIn(v
) => write
!(f
, "SaveIn({:?})", Value(v
)),
105 Ignore
=> f
.write_str("Ignore")
110 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
111 /// better optimized LLVM code.
112 pub fn trans_into
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
115 -> Block
<'blk
, 'tcx
> {
118 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
120 if adjustment_required(bcx
, expr
) {
121 // use trans, which may be less efficient but
122 // which will perform the adjustments:
123 let datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
124 return datum
.store_to_dest(bcx
, dest
, expr
.id
);
127 let qualif
= *bcx
.tcx().const_qualif_map
.borrow().get(&expr
.id
).unwrap();
128 if !qualif
.intersects(ConstQualif
::NOT_CONST
| ConstQualif
::NEEDS_DROP
) {
129 if !qualif
.intersects(ConstQualif
::PREFER_IN_PLACE
) {
130 if let SaveIn(lldest
) = dest
{
131 match consts
::get_const_expr_as_global(bcx
.ccx(), expr
, qualif
,
132 bcx
.fcx
.param_substs
,
133 consts
::TrueConst
::No
) {
135 // Cast pointer to destination, because constants
136 // have different types.
137 let lldest
= PointerCast(bcx
, lldest
, val_ty(global
));
138 memcpy_ty(bcx
, lldest
, global
, expr_ty_adjusted(bcx
, expr
));
141 Err(consts
::ConstEvalFailure
::Runtime(_
)) => {
142 // in case const evaluation errors, translate normally
143 // debug assertions catch the same errors
146 Err(consts
::ConstEvalFailure
::Compiletime(_
)) => {
152 // If we see a const here, that's because it evaluates to a type with zero size. We
153 // should be able to just discard it, since const expressions are guaranteed not to
154 // have side effects. This seems to be reached through tuple struct constructors being
155 // passed zero-size constants.
156 if let hir
::ExprPath(..) = expr
.node
{
157 match bcx
.def(expr
.id
) {
158 Def
::Const(_
) | Def
::AssociatedConst(_
) => {
159 assert
!(type_is_zero_size(bcx
.ccx(), bcx
.tcx().node_id_to_type(expr
.id
)));
166 // Even if we don't have a value to emit, and the expression
167 // doesn't have any side-effects, we still have to translate the
168 // body of any closures.
169 // FIXME: Find a better way of handling this case.
171 // The only way we're going to see a `const` at this point is if
172 // it prefers in-place instantiation, likely because it contains
173 // `[x; N]` somewhere within.
175 hir
::ExprPath(..) => {
176 match bcx
.def(expr
.id
) {
177 Def
::Const(did
) | Def
::AssociatedConst(did
) => {
178 let empty_substs
= bcx
.tcx().mk_substs(Substs
::empty());
179 let const_expr
= consts
::get_const_expr(bcx
.ccx(), did
, expr
,
181 // Temporarily get cleanup scopes out of the way,
182 // as they require sub-expressions to be contained
183 // inside the current AST scope.
184 // These should record no cleanups anyways, `const`
185 // can't have destructors.
186 let scopes
= mem
::replace(&mut *bcx
.fcx
.scopes
.borrow_mut(),
188 // Lock emitted debug locations to the location of
189 // the constant reference expression.
190 debuginfo
::with_source_location_override(bcx
.fcx
,
193 bcx
= trans_into(bcx
, const_expr
, dest
)
195 let scopes
= mem
::replace(&mut *bcx
.fcx
.scopes
.borrow_mut(),
197 assert
!(scopes
.is_empty());
208 debug
!("trans_into() expr={:?}", expr
);
210 let cleanup_debug_loc
= debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
214 bcx
.fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
216 let kind
= expr_kind(bcx
.tcx(), expr
);
218 ExprKind
::Lvalue
| ExprKind
::RvalueDatum
=> {
219 trans_unadjusted(bcx
, expr
).store_to_dest(dest
, expr
.id
)
221 ExprKind
::RvalueDps
=> {
222 trans_rvalue_dps_unadjusted(bcx
, expr
, dest
)
224 ExprKind
::RvalueStmt
=> {
225 trans_rvalue_stmt_unadjusted(bcx
, expr
)
229 bcx
.fcx
.pop_and_trans_ast_cleanup_scope(bcx
, expr
.id
)
232 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
233 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
235 pub fn trans
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
237 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
238 debug
!("trans(expr={:?})", expr
);
242 let qualif
= *bcx
.tcx().const_qualif_map
.borrow().get(&expr
.id
).unwrap();
243 let adjusted_global
= !qualif
.intersects(ConstQualif
::NON_STATIC_BORROWS
);
244 let global
= if !qualif
.intersects(ConstQualif
::NOT_CONST
| ConstQualif
::NEEDS_DROP
) {
245 match consts
::get_const_expr_as_global(bcx
.ccx(), expr
, qualif
,
246 bcx
.fcx
.param_substs
,
247 consts
::TrueConst
::No
) {
249 if qualif
.intersects(ConstQualif
::HAS_STATIC_BORROWS
) {
250 // Is borrowed as 'static, must return lvalue.
252 // Cast pointer to global, because constants have different types.
253 let const_ty
= expr_ty_adjusted(bcx
, expr
);
254 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
255 let global
= PointerCast(bcx
, global
, llty
.ptr_to());
256 let datum
= Datum
::new(global
, const_ty
, Lvalue
::new("expr::trans"));
257 return DatumBlock
::new(bcx
, datum
.to_expr_datum());
260 // Otherwise, keep around and perform adjustments, if needed.
261 let const_ty
= if adjusted_global
{
262 expr_ty_adjusted(bcx
, expr
)
267 // This could use a better heuristic.
268 Some(if type_is_immediate(bcx
.ccx(), const_ty
) {
269 // Cast pointer to global, because constants have different types.
270 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
271 let global
= PointerCast(bcx
, global
, llty
.ptr_to());
272 // Maybe just get the value directly, instead of loading it?
273 immediate_rvalue(load_ty(bcx
, global
, const_ty
), const_ty
)
275 let scratch
= alloc_ty(bcx
, const_ty
, "const");
276 call_lifetime_start(bcx
, scratch
);
277 let lldest
= if !const_ty
.is_structural() {
278 // Cast pointer to slot, because constants have different types.
279 PointerCast(bcx
, scratch
, val_ty(global
))
281 // In this case, memcpy_ty calls llvm.memcpy after casting both
282 // source and destination to i8*, so we don't need any casts.
285 memcpy_ty(bcx
, lldest
, global
, const_ty
);
286 Datum
::new(scratch
, const_ty
, Rvalue
::new(ByRef
))
289 Err(consts
::ConstEvalFailure
::Runtime(_
)) => {
290 // in case const evaluation errors, translate normally
291 // debug assertions catch the same errors
295 Err(consts
::ConstEvalFailure
::Compiletime(_
)) => {
296 // generate a dummy llvm value
297 let const_ty
= expr_ty(bcx
, expr
);
298 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
299 let dummy
= C_undef(llty
.ptr_to());
300 Some(Datum
::new(dummy
, const_ty
, Rvalue
::new(ByRef
)))
307 let cleanup_debug_loc
= debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
311 fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
312 let datum
= match global
{
313 Some(rvalue
) => rvalue
.to_expr_datum(),
314 None
=> unpack_datum
!(bcx
, trans_unadjusted(bcx
, expr
))
316 let datum
= if adjusted_global
{
317 datum
// trans::consts already performed adjustments.
319 unpack_datum
!(bcx
, apply_adjustments(bcx
, expr
, datum
))
321 bcx
= fcx
.pop_and_trans_ast_cleanup_scope(bcx
, expr
.id
);
322 return DatumBlock
::new(bcx
, datum
);
325 pub fn get_meta(bcx
: Block
, fat_ptr
: ValueRef
) -> ValueRef
{
326 StructGEP(bcx
, fat_ptr
, abi
::FAT_PTR_EXTRA
)
329 pub fn get_dataptr(bcx
: Block
, fat_ptr
: ValueRef
) -> ValueRef
{
330 StructGEP(bcx
, fat_ptr
, abi
::FAT_PTR_ADDR
)
333 pub fn copy_fat_ptr(bcx
: Block
, src_ptr
: ValueRef
, dst_ptr
: ValueRef
) {
334 Store(bcx
, Load(bcx
, get_dataptr(bcx
, src_ptr
)), get_dataptr(bcx
, dst_ptr
));
335 Store(bcx
, Load(bcx
, get_meta(bcx
, src_ptr
)), get_meta(bcx
, dst_ptr
));
338 fn adjustment_required
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
339 expr
: &hir
::Expr
) -> bool
{
340 let adjustment
= match bcx
.tcx().tables
.borrow().adjustments
.get(&expr
.id
).cloned() {
341 None
=> { return false; }
345 // Don't skip a conversion from Box<T> to &T, etc.
346 if bcx
.tcx().is_overloaded_autoderef(expr
.id
, 0) {
351 AdjustReifyFnPointer
=> true,
352 AdjustUnsafeFnPointer
| AdjustMutToConstPointer
=> {
353 // purely a type-level thing
356 AdjustDerefRef(ref adj
) => {
357 // We are a bit paranoid about adjustments and thus might have a re-
358 // borrow here which merely derefs and then refs again (it might have
359 // a different region or mutability, but we don't care here).
360 !(adj
.autoderefs
== 1 && adj
.autoref
.is_some() && adj
.unsize
.is_none())
365 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
366 /// translation of `expr`.
367 fn apply_adjustments
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
369 datum
: Datum
<'tcx
, Expr
>)
370 -> DatumBlock
<'blk
, 'tcx
, Expr
>
373 let mut datum
= datum
;
374 let adjustment
= match bcx
.tcx().tables
.borrow().adjustments
.get(&expr
.id
).cloned() {
376 return DatumBlock
::new(bcx
, datum
);
380 debug
!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
381 expr
, datum
, adjustment
);
383 AdjustReifyFnPointer
=> {
385 ty
::TyFnDef(def_id
, substs
, _
) => {
386 datum
= Callee
::def(bcx
.ccx(), def_id
, substs
)
387 .reify(bcx
.ccx()).to_expr_datum();
390 bug
!("{} cannot be reified to a fn ptr", datum
.ty
)
394 AdjustUnsafeFnPointer
| AdjustMutToConstPointer
=> {
395 // purely a type-level thing
397 AdjustDerefRef(ref adj
) => {
398 let skip_reborrows
= if adj
.autoderefs
== 1 && adj
.autoref
.is_some() {
399 // We are a bit paranoid about adjustments and thus might have a re-
400 // borrow here which merely derefs and then refs again (it might have
401 // a different region or mutability, but we don't care here).
403 // Don't skip a conversion from Box<T> to &T, etc.
405 if bcx
.tcx().is_overloaded_autoderef(expr
.id
, 0) {
406 // Don't skip an overloaded deref.
418 if adj
.autoderefs
> skip_reborrows
{
420 let lval
= unpack_datum
!(bcx
, datum
.to_lvalue_datum(bcx
, "auto_deref", expr
.id
));
421 datum
= unpack_datum
!(bcx
, deref_multiple(bcx
, expr
,
422 lval
.to_expr_datum(),
423 adj
.autoderefs
- skip_reborrows
));
426 // (You might think there is a more elegant way to do this than a
427 // skip_reborrows bool, but then you remember that the borrow checker exists).
428 if skip_reborrows
== 0 && adj
.autoref
.is_some() {
429 datum
= unpack_datum
!(bcx
, auto_ref(bcx
, datum
, expr
));
432 if let Some(target
) = adj
.unsize
{
433 // We do not arrange cleanup ourselves; if we already are an
434 // L-value, then cleanup will have already been scheduled (and
435 // the `datum.to_rvalue_datum` call below will emit code to zero
436 // the drop flag when moving out of the L-value). If we are an
437 // R-value, then we do not need to schedule cleanup.
438 let source_datum
= unpack_datum
!(bcx
,
439 datum
.to_rvalue_datum(bcx
, "__coerce_source"));
441 let target
= bcx
.monomorphize(&target
);
443 let scratch
= alloc_ty(bcx
, target
, "__coerce_target");
444 call_lifetime_start(bcx
, scratch
);
445 let target_datum
= Datum
::new(scratch
, target
,
447 bcx
= coerce_unsized(bcx
, expr
.span
, source_datum
, target_datum
);
448 datum
= Datum
::new(scratch
, target
,
449 RvalueExpr(Rvalue
::new(ByRef
)));
453 debug
!("after adjustments, datum={:?}", datum
);
454 DatumBlock
::new(bcx
, datum
)
457 fn coerce_unsized
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
459 source
: Datum
<'tcx
, Rvalue
>,
460 target
: Datum
<'tcx
, Rvalue
>)
461 -> Block
<'blk
, 'tcx
> {
463 debug
!("coerce_unsized({:?} -> {:?})", source
, target
);
465 match (&source
.ty
.sty
, &target
.ty
.sty
) {
466 (&ty
::TyBox(a
), &ty
::TyBox(b
)) |
467 (&ty
::TyRef(_
, ty
::TypeAndMut { ty: a, .. }
),
468 &ty
::TyRef(_
, ty
::TypeAndMut { ty: b, .. }
)) |
469 (&ty
::TyRef(_
, ty
::TypeAndMut { ty: a, .. }
),
470 &ty
::TyRawPtr(ty
::TypeAndMut { ty: b, .. }
)) |
471 (&ty
::TyRawPtr(ty
::TypeAndMut { ty: a, .. }
),
472 &ty
::TyRawPtr(ty
::TypeAndMut { ty: b, .. }
)) => {
473 let (inner_source
, inner_target
) = (a
, b
);
475 let (base
, old_info
) = if !type_is_sized(bcx
.tcx(), inner_source
) {
476 // Normally, the source is a thin pointer and we are
477 // adding extra info to make a fat pointer. The exception
478 // is when we are upcasting an existing object fat pointer
479 // to use a different vtable. In that case, we want to
480 // load out the original data pointer so we can repackage
482 (Load(bcx
, get_dataptr(bcx
, source
.val
)),
483 Some(Load(bcx
, get_meta(bcx
, source
.val
))))
485 let val
= if source
.kind
.is_by_ref() {
486 load_ty(bcx
, source
.val
, source
.ty
)
493 let info
= unsized_info(bcx
.ccx(), inner_source
, inner_target
, old_info
);
495 // Compute the base pointer. This doesn't change the pointer value,
496 // but merely its type.
497 let ptr_ty
= type_of
::in_memory_type_of(bcx
.ccx(), inner_target
).ptr_to();
498 let base
= PointerCast(bcx
, base
, ptr_ty
);
500 Store(bcx
, base
, get_dataptr(bcx
, target
.val
));
501 Store(bcx
, info
, get_meta(bcx
, target
.val
));
504 // This can be extended to enums and tuples in the future.
505 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
506 (&ty
::TyStruct(def_id_a
, _
), &ty
::TyStruct(def_id_b
, _
)) => {
507 assert_eq
!(def_id_a
, def_id_b
);
509 // The target is already by-ref because it's to be written to.
510 let source
= unpack_datum
!(bcx
, source
.to_ref_datum(bcx
));
511 assert
!(target
.kind
.is_by_ref());
513 let kind
= custom_coerce_unsize_info(bcx
.ccx(), source
.ty
, target
.ty
);
515 let repr_source
= adt
::represent_type(bcx
.ccx(), source
.ty
);
516 let src_fields
= match &*repr_source
{
517 &adt
::Repr
::Univariant(ref s
, _
) => &s
.fields
,
519 "Non univariant struct? (repr_source: {:?})",
522 let repr_target
= adt
::represent_type(bcx
.ccx(), target
.ty
);
523 let target_fields
= match &*repr_target
{
524 &adt
::Repr
::Univariant(ref s
, _
) => &s
.fields
,
526 "Non univariant struct? (repr_target: {:?})",
530 let coerce_index
= match kind
{
531 CustomCoerceUnsized
::Struct(i
) => i
533 assert
!(coerce_index
< src_fields
.len() && src_fields
.len() == target_fields
.len());
535 let source_val
= adt
::MaybeSizedValue
::sized(source
.val
);
536 let target_val
= adt
::MaybeSizedValue
::sized(target
.val
);
538 let iter
= src_fields
.iter().zip(target_fields
).enumerate();
539 for (i
, (src_ty
, target_ty
)) in iter
{
540 let ll_source
= adt
::trans_field_ptr(bcx
, &repr_source
, source_val
, Disr(0), i
);
541 let ll_target
= adt
::trans_field_ptr(bcx
, &repr_target
, target_val
, Disr(0), i
);
543 // If this is the field we need to coerce, recurse on it.
544 if i
== coerce_index
{
545 coerce_unsized(bcx
, span
,
546 Datum
::new(ll_source
, src_ty
,
548 Datum
::new(ll_target
, target_ty
,
549 Rvalue
::new(ByRef
)));
551 // Otherwise, simply copy the data from the source.
552 assert
!(src_ty
.is_phantom_data() || src_ty
== target_ty
);
553 memcpy_ty(bcx
, ll_target
, ll_source
, src_ty
);
557 _
=> bug
!("coerce_unsized: invalid coercion {:?} -> {:?}",
564 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
565 /// that the expr represents.
567 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
568 /// something like `x().f` is translated into roughly the equivalent of
570 /// { tmp = x(); tmp.f }
571 pub fn trans_to_lvalue
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
574 -> DatumBlock
<'blk
, 'tcx
, Lvalue
> {
576 let datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
577 return datum
.to_lvalue_datum(bcx
, name
, expr
.id
);
580 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
582 fn trans_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
584 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
587 debug
!("trans_unadjusted(expr={:?})", expr
);
588 let _indenter
= indenter();
590 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
592 return match expr_kind(bcx
.tcx(), expr
) {
593 ExprKind
::Lvalue
| ExprKind
::RvalueDatum
=> {
594 let datum
= unpack_datum
!(bcx
, {
595 trans_datum_unadjusted(bcx
, expr
)
598 DatumBlock {bcx: bcx, datum: datum}
601 ExprKind
::RvalueStmt
=> {
602 bcx
= trans_rvalue_stmt_unadjusted(bcx
, expr
);
603 nil(bcx
, expr_ty(bcx
, expr
))
606 ExprKind
::RvalueDps
=> {
607 let ty
= expr_ty(bcx
, expr
);
608 if type_is_zero_size(bcx
.ccx(), ty
) {
609 bcx
= trans_rvalue_dps_unadjusted(bcx
, expr
, Ignore
);
612 let scratch
= rvalue_scratch_datum(bcx
, ty
, "");
613 bcx
= trans_rvalue_dps_unadjusted(
614 bcx
, expr
, SaveIn(scratch
.val
));
616 // Note: this is not obviously a good idea. It causes
617 // immediate values to be loaded immediately after a
618 // return from a call or other similar expression,
619 // which in turn leads to alloca's having shorter
620 // lifetimes and hence larger stack frames. However,
621 // in turn it can lead to more register pressure.
622 // Still, in practice it seems to increase
623 // performance, since we have fewer problems with
625 let scratch
= unpack_datum
!(
626 bcx
, scratch
.to_appropriate_datum(bcx
));
628 DatumBlock
::new(bcx
, scratch
.to_expr_datum())
633 fn nil
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, ty
: Ty
<'tcx
>)
634 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
635 let llval
= C_undef(type_of
::type_of(bcx
.ccx(), ty
));
636 let datum
= immediate_rvalue(llval
, ty
);
637 DatumBlock
::new(bcx
, datum
.to_expr_datum())
641 fn trans_datum_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
643 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
646 let _icx
= push_ctxt("trans_datum_unadjusted");
649 hir
::ExprType(ref e
, _
) => {
652 hir
::ExprPath(..) => {
653 let var
= trans_var(bcx
, bcx
.def(expr
.id
));
654 DatumBlock
::new(bcx
, var
.to_expr_datum())
656 hir
::ExprField(ref base
, name
) => {
657 trans_rec_field(bcx
, &base
, name
.node
)
659 hir
::ExprTupField(ref base
, idx
) => {
660 trans_rec_tup_field(bcx
, &base
, idx
.node
)
662 hir
::ExprIndex(ref base
, ref idx
) => {
663 trans_index(bcx
, expr
, &base
, &idx
, MethodCall
::expr(expr
.id
))
665 hir
::ExprBox(ref contents
) => {
666 // Special case for `Box<T>`
667 let box_ty
= expr_ty(bcx
, expr
);
668 let contents_ty
= expr_ty(bcx
, &contents
);
671 trans_uniq_expr(bcx
, expr
, box_ty
, &contents
, contents_ty
)
673 _
=> span_bug
!(expr
.span
,
674 "expected unique box")
678 hir
::ExprLit(ref lit
) => trans_immediate_lit(bcx
, expr
, &lit
),
679 hir
::ExprBinary(op
, ref lhs
, ref rhs
) => {
680 trans_binary(bcx
, expr
, op
, &lhs
, &rhs
)
682 hir
::ExprUnary(op
, ref x
) => {
683 trans_unary(bcx
, expr
, op
, &x
)
685 hir
::ExprAddrOf(_
, ref x
) => {
687 hir
::ExprRepeat(..) | hir
::ExprVec(..) => {
688 // Special case for slices.
689 let cleanup_debug_loc
=
690 debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
694 fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
695 let datum
= unpack_datum
!(
696 bcx
, tvec
::trans_slice_vec(bcx
, expr
, &x
));
697 bcx
= fcx
.pop_and_trans_ast_cleanup_scope(bcx
, x
.id
);
698 DatumBlock
::new(bcx
, datum
)
701 trans_addr_of(bcx
, expr
, &x
)
705 hir
::ExprCast(ref val
, _
) => {
706 // Datum output mode means this is a scalar cast:
707 trans_imm_cast(bcx
, &val
, expr
.id
)
712 "trans_rvalue_datum_unadjusted reached \
713 fall-through case: {:?}",
719 fn trans_field
<'blk
, 'tcx
, F
>(bcx
: Block
<'blk
, 'tcx
>,
722 -> DatumBlock
<'blk
, 'tcx
, Expr
> where
723 F
: FnOnce(&'blk TyCtxt
<'tcx
>, &VariantInfo
<'tcx
>) -> usize,
726 let _icx
= push_ctxt("trans_rec_field");
728 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, base
, "field"));
729 let bare_ty
= base_datum
.ty
;
730 let repr
= adt
::represent_type(bcx
.ccx(), bare_ty
);
731 let vinfo
= VariantInfo
::from_ty(bcx
.tcx(), bare_ty
, None
);
733 let ix
= get_idx(bcx
.tcx(), &vinfo
);
734 let d
= base_datum
.get_element(
738 adt
::trans_field_ptr(bcx
, &repr
, srcval
, vinfo
.discr
, ix
)
741 if type_is_sized(bcx
.tcx(), d
.ty
) {
742 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
744 let scratch
= rvalue_scratch_datum(bcx
, d
.ty
, "");
745 Store(bcx
, d
.val
, get_dataptr(bcx
, scratch
.val
));
746 let info
= Load(bcx
, get_meta(bcx
, base_datum
.val
));
747 Store(bcx
, info
, get_meta(bcx
, scratch
.val
));
749 // Always generate an lvalue datum, because this pointer doesn't own
750 // the data and cleanup is scheduled elsewhere.
751 DatumBlock
::new(bcx
, Datum
::new(scratch
.val
, scratch
.ty
, LvalueExpr(d
.kind
)))
755 /// Translates `base.field`.
756 fn trans_rec_field
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
759 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
760 trans_field(bcx
, base
, |_
, vinfo
| vinfo
.field_index(field
))
763 /// Translates `base.<idx>`.
764 fn trans_rec_tup_field
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
767 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
768 trans_field(bcx
, base
, |_
, _
| idx
)
771 fn trans_index
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
772 index_expr
: &hir
::Expr
,
775 method_call
: MethodCall
)
776 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
777 //! Translates `base[idx]`.
779 let _icx
= push_ctxt("trans_index");
783 let index_expr_debug_loc
= index_expr
.debug_loc();
785 // Check for overloaded index.
786 let method
= ccx
.tcx().tables
.borrow().method_map
.get(&method_call
).cloned();
787 let elt_datum
= match method
{
789 let method_ty
= monomorphize_type(bcx
, method
.ty
);
791 let base_datum
= unpack_datum
!(bcx
, trans(bcx
, base
));
793 // Translate index expression.
794 let ix_datum
= unpack_datum
!(bcx
, trans(bcx
, idx
));
796 let ref_ty
= // invoked methods have LB regions instantiated:
797 bcx
.tcx().no_late_bound_regions(&method_ty
.fn_ret()).unwrap().unwrap();
798 let elt_ty
= match ref_ty
.builtin_deref(true, ty
::NoPreference
) {
800 span_bug
!(index_expr
.span
,
801 "index method didn't return a \
802 dereferenceable type?!")
804 Some(elt_tm
) => elt_tm
.ty
,
807 // Overloaded. Invoke the index() method, which basically
808 // yields a `&T` pointer. We can then proceed down the
809 // normal path (below) to dereference that `&T`.
810 let scratch
= rvalue_scratch_datum(bcx
, ref_ty
, "overloaded_index_elt");
812 bcx
= Callee
::method(bcx
, method
)
813 .call(bcx
, index_expr_debug_loc
,
814 ArgOverloadedOp(base_datum
, Some(ix_datum
)),
815 Some(SaveIn(scratch
.val
))).bcx
;
817 let datum
= scratch
.to_expr_datum();
818 let lval
= Lvalue
::new("expr::trans_index overload");
819 if type_is_sized(bcx
.tcx(), elt_ty
) {
820 Datum
::new(datum
.to_llscalarish(bcx
), elt_ty
, LvalueExpr(lval
))
822 Datum
::new(datum
.val
, elt_ty
, LvalueExpr(lval
))
826 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
,
830 // Translate index expression and cast to a suitable LLVM integer.
831 // Rust is less strict than LLVM in this regard.
832 let ix_datum
= unpack_datum
!(bcx
, trans(bcx
, idx
));
833 let ix_val
= ix_datum
.to_llscalarish(bcx
);
834 let ix_size
= machine
::llbitsize_of_real(bcx
.ccx(),
836 let int_size
= machine
::llbitsize_of_real(bcx
.ccx(),
839 if ix_size
< int_size
{
840 if expr_ty(bcx
, idx
).is_signed() {
841 SExt(bcx
, ix_val
, ccx
.int_type())
842 } else { ZExt(bcx, ix_val, ccx.int_type()) }
843 } else if ix_size
> int_size
{
844 Trunc(bcx
, ix_val
, ccx
.int_type())
850 let unit_ty
= base_datum
.ty
.sequence_element_type(bcx
.tcx());
852 let (base
, len
) = base_datum
.get_vec_base_and_len(bcx
);
854 debug
!("trans_index: base {:?}", Value(base
));
855 debug
!("trans_index: len {:?}", Value(len
));
857 let bounds_check
= ICmp(bcx
,
861 index_expr_debug_loc
);
862 let expect
= ccx
.get_intrinsic(&("llvm.expect.i1"));
863 let expected
= Call(bcx
,
865 &[bounds_check
, C_bool(ccx
, false)],
866 index_expr_debug_loc
);
867 bcx
= with_cond(bcx
, expected
, |bcx
| {
868 controlflow
::trans_fail_bounds_check(bcx
,
869 expr_info(index_expr
),
873 let elt
= InBoundsGEP(bcx
, base
, &[ix_val
]);
874 let elt
= PointerCast(bcx
, elt
, type_of
::type_of(ccx
, unit_ty
).ptr_to());
875 let lval
= Lvalue
::new("expr::trans_index fallback");
876 Datum
::new(elt
, unit_ty
, LvalueExpr(lval
))
880 DatumBlock
::new(bcx
, elt_datum
)
883 /// Translates a reference to a variable.
884 pub fn trans_var
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, def
: Def
)
885 -> Datum
<'tcx
, Lvalue
> {
888 Def
::Static(did
, _
) => consts
::get_static(bcx
.ccx(), did
),
889 Def
::Upvar(_
, nid
, _
, _
) => {
890 // Can't move upvars, so this is never a ZeroMemLastUse.
891 let local_ty
= node_id_type(bcx
, nid
);
892 let lval
= Lvalue
::new_with_hint("expr::trans_var (upvar)",
893 bcx
, nid
, HintKind
::ZeroAndMaintain
);
894 match bcx
.fcx
.llupvars
.borrow().get(&nid
) {
895 Some(&val
) => Datum
::new(val
, local_ty
, lval
),
897 bug
!("trans_var: no llval for upvar {} found", nid
);
901 Def
::Local(_
, nid
) => {
902 let datum
= match bcx
.fcx
.lllocals
.borrow().get(&nid
) {
905 bug
!("trans_var: no datum for local/arg {} found", nid
);
908 debug
!("take_local(nid={}, v={:?}, ty={})",
909 nid
, Value(datum
.val
), datum
.ty
);
912 _
=> bug
!("{:?} should not reach expr::trans_var", def
)
916 fn trans_rvalue_stmt_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
918 -> Block
<'blk
, 'tcx
> {
920 let _icx
= push_ctxt("trans_rvalue_stmt");
922 if bcx
.unreachable
.get() {
926 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
929 hir
::ExprBreak(label_opt
) => {
930 controlflow
::trans_break(bcx
, expr
, label_opt
.map(|l
| l
.node
.name
))
932 hir
::ExprType(ref e
, _
) => {
933 trans_into(bcx
, &e
, Ignore
)
935 hir
::ExprAgain(label_opt
) => {
936 controlflow
::trans_cont(bcx
, expr
, label_opt
.map(|l
| l
.node
.name
))
938 hir
::ExprRet(ref ex
) => {
939 // Check to see if the return expression itself is reachable.
940 // This can occur when the inner expression contains a return
941 let reachable
= if let Some(ref cfg
) = bcx
.fcx
.cfg
{
942 cfg
.node_is_reachable(expr
.id
)
948 controlflow
::trans_ret(bcx
, expr
, ex
.as_ref().map(|e
| &**e
))
950 // If it's not reachable, just translate the inner expression
951 // directly. This avoids having to manage a return slot when
952 // it won't actually be used anyway.
953 if let &Some(ref x
) = ex
{
954 bcx
= trans_into(bcx
, &x
, Ignore
);
956 // Mark the end of the block as unreachable. Once we get to
957 // a return expression, there's no more we should be doing
963 hir
::ExprWhile(ref cond
, ref body
, _
) => {
964 controlflow
::trans_while(bcx
, expr
, &cond
, &body
)
966 hir
::ExprLoop(ref body
, _
) => {
967 controlflow
::trans_loop(bcx
, expr
, &body
)
969 hir
::ExprAssign(ref dst
, ref src
) => {
970 let src_datum
= unpack_datum
!(bcx
, trans(bcx
, &src
));
971 let dst_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, &dst
, "assign"));
973 if bcx
.fcx
.type_needs_drop(dst_datum
.ty
) {
974 // If there are destructors involved, make sure we
975 // are copying from an rvalue, since that cannot possible
976 // alias an lvalue. We are concerned about code like:
984 // where e.g. a : Option<Foo> and a.b :
985 // Option<Foo>. In that case, freeing `a` before the
986 // assignment may also free `a.b`!
988 // We could avoid this intermediary with some analysis
989 // to determine whether `dst` may possibly own `src`.
990 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
991 let src_datum
= unpack_datum
!(
992 bcx
, src_datum
.to_rvalue_datum(bcx
, "ExprAssign"));
993 let opt_hint_datum
= dst_datum
.kind
.drop_flag_info
.hint_datum(bcx
);
994 let opt_hint_val
= opt_hint_datum
.map(|d
|d
.to_value());
996 // 1. Drop the data at the destination, passing the
997 // drop-hint in case the lvalue has already been
999 bcx
= glue
::drop_ty_core(bcx
,
1006 // 2. We are overwriting the destination; ensure that
1007 // its drop-hint (if any) says "initialized."
1008 if let Some(hint_val
) = opt_hint_val
{
1009 let hint_llval
= hint_val
.value();
1010 let drop_needed
= C_u8(bcx
.fcx
.ccx
, adt
::DTOR_NEEDED_HINT
);
1011 Store(bcx
, drop_needed
, hint_llval
);
1013 src_datum
.store_to(bcx
, dst_datum
.val
)
1015 src_datum
.store_to(bcx
, dst_datum
.val
)
1018 hir
::ExprAssignOp(op
, ref dst
, ref src
) => {
1019 let method
= bcx
.tcx().tables
1022 .get(&MethodCall
::expr(expr
.id
)).cloned();
1024 if let Some(method
) = method
{
1025 let dst
= unpack_datum
!(bcx
, trans(bcx
, &dst
));
1026 let src_datum
= unpack_datum
!(bcx
, trans(bcx
, &src
));
1028 Callee
::method(bcx
, method
)
1029 .call(bcx
, expr
.debug_loc(),
1030 ArgOverloadedOp(dst
, Some(src_datum
)), None
).bcx
1032 trans_assign_op(bcx
, expr
, op
, &dst
, &src
)
1035 hir
::ExprInlineAsm(ref a
, ref outputs
, ref inputs
) => {
1036 let outputs
= outputs
.iter().map(|output
| {
1037 let out_datum
= unpack_datum
!(bcx
, trans(bcx
, output
));
1038 unpack_datum
!(bcx
, out_datum
.to_lvalue_datum(bcx
, "out", expr
.id
))
1040 let inputs
= inputs
.iter().map(|input
| {
1041 let input
= unpack_datum
!(bcx
, trans(bcx
, input
));
1042 let input
= unpack_datum
!(bcx
, input
.to_rvalue_datum(bcx
, "in"));
1043 input
.to_llscalarish(bcx
)
1045 asm
::trans_inline_asm(bcx
, a
, outputs
, inputs
);
1051 "trans_rvalue_stmt_unadjusted reached \
1052 fall-through case: {:?}",
1058 fn trans_rvalue_dps_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1061 -> Block
<'blk
, 'tcx
> {
1062 let _icx
= push_ctxt("trans_rvalue_dps_unadjusted");
1065 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
1067 // Entry into the method table if this is an overloaded call/op.
1068 let method_call
= MethodCall
::expr(expr
.id
);
1071 hir
::ExprType(ref e
, _
) => {
1072 trans_into(bcx
, &e
, dest
)
1074 hir
::ExprPath(..) => {
1075 trans_def_dps_unadjusted(bcx
, expr
, bcx
.def(expr
.id
), dest
)
1077 hir
::ExprIf(ref cond
, ref thn
, ref els
) => {
1078 controlflow
::trans_if(bcx
, expr
.id
, &cond
, &thn
, els
.as_ref().map(|e
| &**e
), dest
)
1080 hir
::ExprMatch(ref discr
, ref arms
, _
) => {
1081 _match
::trans_match(bcx
, expr
, &discr
, &arms
[..], dest
)
1083 hir
::ExprBlock(ref blk
) => {
1084 controlflow
::trans_block(bcx
, &blk
, dest
)
1086 hir
::ExprStruct(_
, ref fields
, ref base
) => {
1089 base
.as_ref().map(|e
| &**e
),
1092 node_id_type(bcx
, expr
.id
),
1095 hir
::ExprTup(ref args
) => {
1096 let numbered_fields
: Vec
<(usize, &hir
::Expr
)> =
1097 args
.iter().enumerate().map(|(i
, arg
)| (i
, &**arg
)).collect();
1101 &numbered_fields
[..],
1106 hir
::ExprLit(ref lit
) => {
1108 ast
::LitKind
::Str(ref s
, _
) => {
1109 tvec
::trans_lit_str(bcx
, expr
, (*s
).clone(), dest
)
1112 span_bug
!(expr
.span
,
1113 "trans_rvalue_dps_unadjusted shouldn't be \
1114 translating this type of literal")
1118 hir
::ExprVec(..) | hir
::ExprRepeat(..) => {
1119 tvec
::trans_fixed_vstore(bcx
, expr
, dest
)
1121 hir
::ExprClosure(_
, ref decl
, ref body
) => {
1122 let dest
= match dest
{
1123 SaveIn(lldest
) => closure
::Dest
::SaveIn(bcx
, lldest
),
1124 Ignore
=> closure
::Dest
::Ignore(bcx
.ccx())
1127 // NB. To get the id of the closure, we don't use
1128 // `local_def_id(id)`, but rather we extract the closure
1129 // def-id from the expr's type. This is because this may
1130 // be an inlined expression from another crate, and we
1131 // want to get the ORIGINAL closure def-id, since that is
1132 // the key we need to find the closure-kind and
1133 // closure-type etc.
1134 let (def_id
, substs
) = match expr_ty(bcx
, expr
).sty
{
1135 ty
::TyClosure(def_id
, ref substs
) => (def_id
, substs
),
1139 "closure expr without closure type: {:?}", t
),
1142 closure
::trans_closure_expr(dest
,
1147 substs
).unwrap_or(bcx
)
1149 hir
::ExprCall(ref f
, ref args
) => {
1150 let method
= bcx
.tcx().tables
.borrow().method_map
.get(&method_call
).cloned();
1151 let (callee
, args
) = if let Some(method
) = method
{
1152 let mut all_args
= vec
![&**f
];
1153 all_args
.extend(args
.iter().map(|e
| &**e
));
1155 (Callee
::method(bcx
, method
), ArgOverloadedCall(all_args
))
1157 let f
= unpack_datum
!(bcx
, trans(bcx
, f
));
1159 ty
::TyFnDef(def_id
, substs
, _
) => {
1160 Callee
::def(bcx
.ccx(), def_id
, substs
)
1163 let f
= unpack_datum
!(bcx
,
1164 f
.to_rvalue_datum(bcx
, "callee"));
1168 span_bug
!(expr
.span
,
1169 "type of callee is not a fn: {}", f
.ty
);
1173 callee
.call(bcx
, expr
.debug_loc(), args
, Some(dest
)).bcx
1175 hir
::ExprMethodCall(_
, _
, ref args
) => {
1176 Callee
::method_call(bcx
, method_call
)
1177 .call(bcx
, expr
.debug_loc(), ArgExprs(&args
), Some(dest
)).bcx
1179 hir
::ExprBinary(op
, ref lhs
, ref rhs_expr
) => {
1180 // if not overloaded, would be RvalueDatumExpr
1181 let lhs
= unpack_datum
!(bcx
, trans(bcx
, &lhs
));
1182 let mut rhs
= unpack_datum
!(bcx
, trans(bcx
, &rhs_expr
));
1183 if !op
.node
.is_by_value() {
1184 rhs
= unpack_datum
!(bcx
, auto_ref(bcx
, rhs
, rhs_expr
));
1187 Callee
::method_call(bcx
, method_call
)
1188 .call(bcx
, expr
.debug_loc(),
1189 ArgOverloadedOp(lhs
, Some(rhs
)), Some(dest
)).bcx
1191 hir
::ExprUnary(_
, ref subexpr
) => {
1192 // if not overloaded, would be RvalueDatumExpr
1193 let arg
= unpack_datum
!(bcx
, trans(bcx
, &subexpr
));
1195 Callee
::method_call(bcx
, method_call
)
1196 .call(bcx
, expr
.debug_loc(),
1197 ArgOverloadedOp(arg
, None
), Some(dest
)).bcx
1199 hir
::ExprCast(..) => {
1200 // Trait casts used to come this way, now they should be coercions.
1201 span_bug
!(expr
.span
, "DPS expr_cast (residual trait cast?)")
1203 hir
::ExprAssignOp(op
, _
, _
) => {
1206 "augmented assignment `{}=` should always be a rvalue_stmt",
1212 "trans_rvalue_dps_unadjusted reached fall-through \
1219 fn trans_def_dps_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1220 ref_expr
: &hir
::Expr
,
1223 -> Block
<'blk
, 'tcx
> {
1224 let _icx
= push_ctxt("trans_def_dps_unadjusted");
1226 let lldest
= match dest
{
1227 SaveIn(lldest
) => lldest
,
1228 Ignore
=> { return bcx; }
1231 let ty
= expr_ty(bcx
, ref_expr
);
1232 if let ty
::TyFnDef(..) = ty
.sty
{
1233 // Zero-sized function or ctor.
1238 Def
::Variant(tid
, vid
) => {
1239 let variant
= bcx
.tcx().lookup_adt_def(tid
).variant_with_id(vid
);
1241 let ty
= expr_ty(bcx
, ref_expr
);
1242 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1243 adt
::trans_set_discr(bcx
, &repr
, lldest
, Disr
::from(variant
.disr_val
));
1246 Def
::Struct(..) => {
1248 ty
::TyStruct(def
, _
) if def
.has_dtor() => {
1249 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1250 adt
::trans_set_discr(bcx
, &repr
, lldest
, Disr(0));
1257 span_bug
!(ref_expr
.span
,
1258 "Non-DPS def {:?} referened by {}",
1259 def
, bcx
.node_id_to_string(ref_expr
.id
));
1264 fn trans_struct
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1265 fields
: &[hir
::Field
],
1266 base
: Option
<&hir
::Expr
>,
1267 expr_span
: codemap
::Span
,
1268 expr_id
: ast
::NodeId
,
1270 dest
: Dest
) -> Block
<'blk
, 'tcx
> {
1271 let _icx
= push_ctxt("trans_rec");
1273 let tcx
= bcx
.tcx();
1274 let vinfo
= VariantInfo
::of_node(tcx
, ty
, expr_id
);
1276 let mut need_base
= vec
![true; vinfo
.fields
.len()];
1278 let numbered_fields
= fields
.iter().map(|field
| {
1279 let pos
= vinfo
.field_index(field
.name
.node
);
1280 need_base
[pos
] = false;
1282 }).collect
::<Vec
<_
>>();
1284 let optbase
= match base
{
1285 Some(base_expr
) => {
1286 let mut leftovers
= Vec
::new();
1287 for (i
, b
) in need_base
.iter().enumerate() {
1289 leftovers
.push((i
, vinfo
.fields
[i
].1));
1292 Some(StructBaseInfo
{expr
: base_expr
,
1293 fields
: leftovers
})
1296 if need_base
.iter().any(|b
| *b
) {
1297 span_bug
!(expr_span
, "missing fields and no base expr")
1309 DebugLoc
::At(expr_id
, expr_span
))
1312 /// Information that `trans_adt` needs in order to fill in the fields
1313 /// of a struct copied from a base struct (e.g., from an expression
1314 /// like `Foo { a: b, ..base }`.
1316 /// Note that `fields` may be empty; the base expression must always be
1317 /// evaluated for side-effects.
1318 pub struct StructBaseInfo
<'a
, 'tcx
> {
1319 /// The base expression; will be evaluated after all explicit fields.
1320 expr
: &'a hir
::Expr
,
1321 /// The indices of fields to copy paired with their types.
1322 fields
: Vec
<(usize, Ty
<'tcx
>)>
1325 /// Constructs an ADT instance:
1327 /// - `fields` should be a list of field indices paired with the
1328 /// expression to store into that field. The initializers will be
1329 /// evaluated in the order specified by `fields`.
1331 /// - `optbase` contains information on the base struct (if any) from
1332 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1333 pub fn trans_adt
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
1336 fields
: &[(usize, &hir
::Expr
)],
1337 optbase
: Option
<StructBaseInfo
<'a
, 'tcx
>>,
1339 debug_location
: DebugLoc
)
1340 -> Block
<'blk
, 'tcx
> {
1341 let _icx
= push_ctxt("trans_adt");
1343 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1345 debug_location
.apply(bcx
.fcx
);
1347 // If we don't care about the result, just make a
1348 // temporary stack slot
1349 let addr
= match dest
{
1352 let llresult
= alloc_ty(bcx
, ty
, "temp");
1353 call_lifetime_start(bcx
, llresult
);
1358 debug
!("trans_adt");
1360 // This scope holds intermediates that must be cleaned should
1361 // panic occur before the ADT as a whole is ready.
1362 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1365 // Issue 23112: The original logic appeared vulnerable to same
1366 // order-of-eval bug. But, SIMD values are tuple-structs;
1367 // i.e. functional record update (FRU) syntax is unavailable.
1369 // To be safe, double-check that we did not get here via FRU.
1370 assert
!(optbase
.is_none());
1372 // This is the constructor of a SIMD type, such types are
1373 // always primitive machine types and so do not have a
1374 // destructor or require any clean-up.
1375 let llty
= type_of
::type_of(bcx
.ccx(), ty
);
1377 // keep a vector as a register, and running through the field
1378 // `insertelement`ing them directly into that register
1379 // (i.e. avoid GEPi and `store`s to an alloca) .
1380 let mut vec_val
= C_undef(llty
);
1382 for &(i
, ref e
) in fields
{
1383 let block_datum
= trans(bcx
, &e
);
1384 bcx
= block_datum
.bcx
;
1385 let position
= C_uint(bcx
.ccx(), i
);
1386 let value
= block_datum
.datum
.to_llscalarish(bcx
);
1387 vec_val
= InsertElement(bcx
, vec_val
, value
, position
);
1389 Store(bcx
, vec_val
, addr
);
1390 } else if let Some(base
) = optbase
{
1391 // Issue 23112: If there is a base, then order-of-eval
1392 // requires field expressions eval'ed before base expression.
1394 // First, trans field expressions to temporary scratch values.
1395 let scratch_vals
: Vec
<_
> = fields
.iter().map(|&(i
, ref e
)| {
1396 let datum
= unpack_datum
!(bcx
, trans(bcx
, &e
));
1400 debug_location
.apply(bcx
.fcx
);
1402 // Second, trans the base to the dest.
1403 assert_eq
!(discr
, Disr(0));
1405 let addr
= adt
::MaybeSizedValue
::sized(addr
);
1406 match expr_kind(bcx
.tcx(), &base
.expr
) {
1407 ExprKind
::RvalueDps
| ExprKind
::RvalueDatum
if !bcx
.fcx
.type_needs_drop(ty
) => {
1408 bcx
= trans_into(bcx
, &base
.expr
, SaveIn(addr
.value
));
1410 ExprKind
::RvalueStmt
=> {
1411 bug
!("unexpected expr kind for struct base expr")
1414 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, &base
.expr
, "base"));
1415 for &(i
, t
) in &base
.fields
{
1416 let datum
= base_datum
.get_element(
1417 bcx
, t
, |srcval
| adt
::trans_field_ptr(bcx
, &repr
, srcval
, discr
, i
));
1418 assert
!(type_is_sized(bcx
.tcx(), datum
.ty
));
1419 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1420 bcx
= datum
.store_to(bcx
, dest
);
1425 // Finally, move scratch field values into actual field locations
1426 for (i
, datum
) in scratch_vals
{
1427 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1428 bcx
= datum
.store_to(bcx
, dest
);
1431 // No base means we can write all fields directly in place.
1432 let addr
= adt
::MaybeSizedValue
::sized(addr
);
1433 for &(i
, ref e
) in fields
{
1434 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1435 let e_ty
= expr_ty_adjusted(bcx
, &e
);
1436 bcx
= trans_into(bcx
, &e
, SaveIn(dest
));
1437 let scope
= cleanup
::CustomScope(custom_cleanup_scope
);
1438 fcx
.schedule_lifetime_end(scope
, dest
);
1439 // FIXME: nonzeroing move should generalize to fields
1440 fcx
.schedule_drop_mem(scope
, dest
, e_ty
, None
);
1444 adt
::trans_set_discr(bcx
, &repr
, addr
, discr
);
1446 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1448 // If we don't care about the result drop the temporary we made
1452 bcx
= glue
::drop_ty(bcx
, addr
, ty
, debug_location
);
1453 base
::call_lifetime_end(bcx
, addr
);
1460 fn trans_immediate_lit
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1463 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1464 // must not be a string constant, that is a RvalueDpsExpr
1465 let _icx
= push_ctxt("trans_immediate_lit");
1466 let ty
= expr_ty(bcx
, expr
);
1467 let v
= consts
::const_lit(bcx
.ccx(), expr
, lit
);
1468 immediate_rvalue_bcx(bcx
, v
, ty
).to_expr_datumblock()
1471 fn trans_unary
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1474 sub_expr
: &hir
::Expr
)
1475 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1476 let ccx
= bcx
.ccx();
1478 let _icx
= push_ctxt("trans_unary_datum");
1480 let method_call
= MethodCall
::expr(expr
.id
);
1482 // The only overloaded operator that is translated to a datum
1483 // is an overloaded deref, since it is always yields a `&T`.
1484 // Otherwise, we should be in the RvalueDpsExpr path.
1485 assert
!(op
== hir
::UnDeref
|| !ccx
.tcx().is_method_call(expr
.id
));
1487 let un_ty
= expr_ty(bcx
, expr
);
1489 let debug_loc
= expr
.debug_loc();
1493 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1494 let llresult
= Not(bcx
, datum
.to_llscalarish(bcx
), debug_loc
);
1495 immediate_rvalue_bcx(bcx
, llresult
, un_ty
).to_expr_datumblock()
1498 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1499 let val
= datum
.to_llscalarish(bcx
);
1500 let (bcx
, llneg
) = {
1502 let result
= FNeg(bcx
, val
, debug_loc
);
1505 let is_signed
= un_ty
.is_signed();
1506 let result
= Neg(bcx
, val
, debug_loc
);
1507 let bcx
= if bcx
.ccx().check_overflow() && is_signed
{
1508 let (llty
, min
) = base
::llty_and_min_for_signed_ty(bcx
, un_ty
);
1509 let is_min
= ICmp(bcx
, llvm
::IntEQ
, val
,
1510 C_integral(llty
, min
, true), debug_loc
);
1511 with_cond(bcx
, is_min
, |bcx
| {
1512 let msg
= InternedString
::new(
1513 "attempted to negate with overflow");
1514 controlflow
::trans_fail(bcx
, expr_info(expr
), msg
)
1522 immediate_rvalue_bcx(bcx
, llneg
, un_ty
).to_expr_datumblock()
1525 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1526 deref_once(bcx
, expr
, datum
, method_call
)
1531 fn trans_uniq_expr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1532 box_expr
: &hir
::Expr
,
1534 contents
: &hir
::Expr
,
1535 contents_ty
: Ty
<'tcx
>)
1536 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1537 let _icx
= push_ctxt("trans_uniq_expr");
1539 assert
!(type_is_sized(bcx
.tcx(), contents_ty
));
1540 let llty
= type_of
::type_of(bcx
.ccx(), contents_ty
);
1541 let size
= llsize_of(bcx
.ccx(), llty
);
1542 let align
= C_uint(bcx
.ccx(), type_of
::align_of(bcx
.ccx(), contents_ty
));
1543 let llty_ptr
= llty
.ptr_to();
1544 let Result { bcx, val }
= malloc_raw_dyn(bcx
,
1549 box_expr
.debug_loc());
1550 // Unique boxes do not allocate for zero-size types. The standard library
1551 // may assume that `free` is never called on the pointer returned for
1552 // `Box<ZeroSizeType>`.
1553 let bcx
= if llsize_of_alloc(bcx
.ccx(), llty
) == 0 {
1554 trans_into(bcx
, contents
, SaveIn(val
))
1556 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1557 fcx
.schedule_free_value(cleanup
::CustomScope(custom_cleanup_scope
),
1558 val
, cleanup
::HeapExchange
, contents_ty
);
1559 let bcx
= trans_into(bcx
, contents
, SaveIn(val
));
1560 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1563 immediate_rvalue_bcx(bcx
, val
, box_ty
).to_expr_datumblock()
1566 fn trans_addr_of
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1568 subexpr
: &hir
::Expr
)
1569 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1570 let _icx
= push_ctxt("trans_addr_of");
1572 let sub_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, subexpr
, "addr_of"));
1573 let ty
= expr_ty(bcx
, expr
);
1574 if !type_is_sized(bcx
.tcx(), sub_datum
.ty
) {
1575 // Always generate an lvalue datum, because this pointer doesn't own
1576 // the data and cleanup is scheduled elsewhere.
1577 DatumBlock
::new(bcx
, Datum
::new(sub_datum
.val
, ty
, LvalueExpr(sub_datum
.kind
)))
1579 // Sized value, ref to a thin pointer
1580 immediate_rvalue_bcx(bcx
, sub_datum
.val
, ty
).to_expr_datumblock()
1584 fn trans_scalar_binop
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1585 binop_expr
: &hir
::Expr
,
1588 lhs
: Datum
<'tcx
, Rvalue
>,
1589 rhs
: Datum
<'tcx
, Rvalue
>)
1590 -> DatumBlock
<'blk
, 'tcx
, Expr
>
1592 let _icx
= push_ctxt("trans_scalar_binop");
1594 let tcx
= bcx
.tcx();
1596 assert
!(!lhs_t
.is_simd());
1597 let is_float
= lhs_t
.is_fp();
1598 let is_signed
= lhs_t
.is_signed();
1599 let info
= expr_info(binop_expr
);
1601 let binop_debug_loc
= binop_expr
.debug_loc();
1604 let lhs
= lhs
.to_llscalarish(bcx
);
1605 let rhs
= rhs
.to_llscalarish(bcx
);
1606 let val
= match op
.node
{
1609 FAdd(bcx
, lhs
, rhs
, binop_debug_loc
)
1611 let (newbcx
, res
) = with_overflow_check(
1612 bcx
, OverflowOp
::Add
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1619 FSub(bcx
, lhs
, rhs
, binop_debug_loc
)
1621 let (newbcx
, res
) = with_overflow_check(
1622 bcx
, OverflowOp
::Sub
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1629 FMul(bcx
, lhs
, rhs
, binop_debug_loc
)
1631 let (newbcx
, res
) = with_overflow_check(
1632 bcx
, OverflowOp
::Mul
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1639 FDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1641 // Only zero-check integers; fp /0 is NaN
1642 bcx
= base
::fail_if_zero_or_overflows(bcx
,
1643 expr_info(binop_expr
),
1649 SDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1651 UDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1657 // LLVM currently always lowers the `frem` instructions appropriate
1658 // library calls typically found in libm. Notably f64 gets wired up
1659 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1660 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1661 // instead just an inline function in a header that goes up to a
1662 // f64, uses `fmod`, and then comes back down to a f32.
1664 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1665 // still unconditionally lower frem instructions over 32-bit floats
1666 // to a call to `fmodf`. To work around this we special case MSVC
1667 // 32-bit float rem instructions and instead do the call out to
1668 // `fmod` ourselves.
1670 // Note that this is currently duplicated with src/libcore/ops.rs
1671 // which does the same thing, and it would be nice to perhaps unify
1672 // these two implementations on day! Also note that we call `fmod`
1673 // for both 32 and 64-bit floats because if we emit any FRem
1674 // instruction at all then LLVM is capable of optimizing it into a
1675 // 32-bit FRem (which we're trying to avoid).
1676 let use_fmod
= tcx
.sess
.target
.target
.options
.is_like_msvc
&&
1677 tcx
.sess
.target
.target
.arch
== "x86";
1679 let f64t
= Type
::f64(bcx
.ccx());
1680 let fty
= Type
::func(&[f64t
, f64t
], &f64t
);
1681 let llfn
= declare
::declare_cfn(bcx
.ccx(), "fmod", fty
);
1682 if lhs_t
== tcx
.types
.f32 {
1683 let lhs
= FPExt(bcx
, lhs
, f64t
);
1684 let rhs
= FPExt(bcx
, rhs
, f64t
);
1685 let res
= Call(bcx
, llfn
, &[lhs
, rhs
], binop_debug_loc
);
1686 FPTrunc(bcx
, res
, Type
::f32(bcx
.ccx()))
1688 Call(bcx
, llfn
, &[lhs
, rhs
], binop_debug_loc
)
1691 FRem(bcx
, lhs
, rhs
, binop_debug_loc
)
1694 // Only zero-check integers; fp %0 is NaN
1695 bcx
= base
::fail_if_zero_or_overflows(bcx
,
1696 expr_info(binop_expr
),
1697 op
, lhs
, rhs
, lhs_t
);
1699 SRem(bcx
, lhs
, rhs
, binop_debug_loc
)
1701 URem(bcx
, lhs
, rhs
, binop_debug_loc
)
1705 hir
::BiBitOr
=> Or(bcx
, lhs
, rhs
, binop_debug_loc
),
1706 hir
::BiBitAnd
=> And(bcx
, lhs
, rhs
, binop_debug_loc
),
1707 hir
::BiBitXor
=> Xor(bcx
, lhs
, rhs
, binop_debug_loc
),
1709 let (newbcx
, res
) = with_overflow_check(
1710 bcx
, OverflowOp
::Shl
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1715 let (newbcx
, res
) = with_overflow_check(
1716 bcx
, OverflowOp
::Shr
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1720 hir
::BiEq
| hir
::BiNe
| hir
::BiLt
| hir
::BiGe
| hir
::BiLe
| hir
::BiGt
=> {
1721 base
::compare_scalar_types(bcx
, lhs
, rhs
, lhs_t
, op
.node
, binop_debug_loc
)
1724 span_bug
!(binop_expr
.span
, "unexpected binop");
1728 immediate_rvalue_bcx(bcx
, val
, binop_ty
).to_expr_datumblock()
1731 // refinement types would obviate the need for this
1732 enum lazy_binop_ty
{
1737 fn trans_lazy_binop
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1738 binop_expr
: &hir
::Expr
,
1742 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1743 let _icx
= push_ctxt("trans_lazy_binop");
1744 let binop_ty
= expr_ty(bcx
, binop_expr
);
1747 let DatumBlock {bcx: past_lhs, datum: lhs}
= trans(bcx
, a
);
1748 let lhs
= lhs
.to_llscalarish(past_lhs
);
1750 if past_lhs
.unreachable
.get() {
1751 return immediate_rvalue_bcx(past_lhs
, lhs
, binop_ty
).to_expr_datumblock();
1754 let join
= fcx
.new_id_block("join", binop_expr
.id
);
1755 let before_rhs
= fcx
.new_id_block("before_rhs", b
.id
);
1758 lazy_and
=> CondBr(past_lhs
, lhs
, before_rhs
.llbb
, join
.llbb
, DebugLoc
::None
),
1759 lazy_or
=> CondBr(past_lhs
, lhs
, join
.llbb
, before_rhs
.llbb
, DebugLoc
::None
)
1762 let DatumBlock {bcx: past_rhs, datum: rhs}
= trans(before_rhs
, b
);
1763 let rhs
= rhs
.to_llscalarish(past_rhs
);
1765 if past_rhs
.unreachable
.get() {
1766 return immediate_rvalue_bcx(join
, lhs
, binop_ty
).to_expr_datumblock();
1769 Br(past_rhs
, join
.llbb
, DebugLoc
::None
);
1770 let phi
= Phi(join
, Type
::i1(bcx
.ccx()), &[lhs
, rhs
],
1771 &[past_lhs
.llbb
, past_rhs
.llbb
]);
1773 return immediate_rvalue_bcx(join
, phi
, binop_ty
).to_expr_datumblock();
1776 fn trans_binary
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1781 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1782 let _icx
= push_ctxt("trans_binary");
1783 let ccx
= bcx
.ccx();
1785 // if overloaded, would be RvalueDpsExpr
1786 assert
!(!ccx
.tcx().is_method_call(expr
.id
));
1790 trans_lazy_binop(bcx
, expr
, lazy_and
, lhs
, rhs
)
1793 trans_lazy_binop(bcx
, expr
, lazy_or
, lhs
, rhs
)
1797 let binop_ty
= expr_ty(bcx
, expr
);
1799 let lhs
= unpack_datum
!(bcx
, trans(bcx
, lhs
));
1800 let lhs
= unpack_datum
!(bcx
, lhs
.to_rvalue_datum(bcx
, "binop_lhs"));
1801 debug
!("trans_binary (expr {}): lhs={:?}", expr
.id
, lhs
);
1802 let rhs
= unpack_datum
!(bcx
, trans(bcx
, rhs
));
1803 let rhs
= unpack_datum
!(bcx
, rhs
.to_rvalue_datum(bcx
, "binop_rhs"));
1804 debug
!("trans_binary (expr {}): rhs={:?}", expr
.id
, rhs
);
1806 if type_is_fat_ptr(ccx
.tcx(), lhs
.ty
) {
1807 assert
!(type_is_fat_ptr(ccx
.tcx(), rhs
.ty
),
1808 "built-in binary operators on fat pointers are homogeneous");
1809 assert_eq
!(binop_ty
, bcx
.tcx().types
.bool
);
1810 let val
= base
::compare_scalar_types(
1817 immediate_rvalue_bcx(bcx
, val
, binop_ty
).to_expr_datumblock()
1819 assert
!(!type_is_fat_ptr(ccx
.tcx(), rhs
.ty
),
1820 "built-in binary operators on fat pointers are homogeneous");
1821 trans_scalar_binop(bcx
, expr
, binop_ty
, op
, lhs
, rhs
)
1827 pub fn cast_is_noop
<'tcx
>(tcx
: &TyCtxt
<'tcx
>,
1832 if let Some(&CastKind
::CoercionCast
) = tcx
.cast_kinds
.borrow().get(&expr
.id
) {
1836 match (t_in
.builtin_deref(true, ty
::NoPreference
),
1837 t_out
.builtin_deref(true, ty
::NoPreference
)) {
1838 (Some(ty
::TypeAndMut{ ty: t_in, .. }
), Some(ty
::TypeAndMut{ ty: t_out, .. }
)) => {
1842 // This condition isn't redundant with the check for CoercionCast:
1843 // different types can be substituted into the same type, and
1844 // == equality can be overconservative if there are regions.
1850 fn trans_imm_cast
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1853 -> DatumBlock
<'blk
, 'tcx
, Expr
>
1855 use rustc
::ty
::cast
::CastTy
::*;
1856 use rustc
::ty
::cast
::IntTy
::*;
1858 fn int_cast(bcx
: Block
,
1865 let _icx
= push_ctxt("int_cast");
1866 let srcsz
= llsrctype
.int_width();
1867 let dstsz
= lldsttype
.int_width();
1868 return if dstsz
== srcsz
{
1869 BitCast(bcx
, llsrc
, lldsttype
)
1870 } else if srcsz
> dstsz
{
1871 TruncOrBitCast(bcx
, llsrc
, lldsttype
)
1873 SExtOrBitCast(bcx
, llsrc
, lldsttype
)
1875 ZExtOrBitCast(bcx
, llsrc
, lldsttype
)
1879 fn float_cast(bcx
: Block
,
1885 let _icx
= push_ctxt("float_cast");
1886 let srcsz
= llsrctype
.float_width();
1887 let dstsz
= lldsttype
.float_width();
1888 return if dstsz
> srcsz
{
1889 FPExt(bcx
, llsrc
, lldsttype
)
1890 } else if srcsz
> dstsz
{
1891 FPTrunc(bcx
, llsrc
, lldsttype
)
1895 let _icx
= push_ctxt("trans_cast");
1897 let ccx
= bcx
.ccx();
1899 let t_in
= expr_ty_adjusted(bcx
, expr
);
1900 let t_out
= node_id_type(bcx
, id
);
1902 debug
!("trans_cast({:?} as {:?})", t_in
, t_out
);
1903 let mut ll_t_in
= type_of
::immediate_type_of(ccx
, t_in
);
1904 let ll_t_out
= type_of
::immediate_type_of(ccx
, t_out
);
1905 // Convert the value to be cast into a ValueRef, either by-ref or
1906 // by-value as appropriate given its type:
1907 let mut datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
1909 let datum_ty
= monomorphize_type(bcx
, datum
.ty
);
1911 if cast_is_noop(bcx
.tcx(), expr
, datum_ty
, t_out
) {
1913 return DatumBlock
::new(bcx
, datum
);
1916 if type_is_fat_ptr(bcx
.tcx(), t_in
) {
1917 assert
!(datum
.kind
.is_by_ref());
1918 if type_is_fat_ptr(bcx
.tcx(), t_out
) {
1919 return DatumBlock
::new(bcx
, Datum
::new(
1920 PointerCast(bcx
, datum
.val
, ll_t_out
.ptr_to()),
1923 )).to_expr_datumblock();
1925 // Return the address
1926 return immediate_rvalue_bcx(bcx
,
1928 Load(bcx
, get_dataptr(bcx
, datum
.val
)),
1930 t_out
).to_expr_datumblock();
1934 let r_t_in
= CastTy
::from_ty(t_in
).expect("bad input type for cast");
1935 let r_t_out
= CastTy
::from_ty(t_out
).expect("bad output type for cast");
1937 let (llexpr
, signed
) = if let Int(CEnum
) = r_t_in
{
1938 let repr
= adt
::represent_type(ccx
, t_in
);
1939 let datum
= unpack_datum
!(
1940 bcx
, datum
.to_lvalue_datum(bcx
, "trans_imm_cast", expr
.id
));
1941 let llexpr_ptr
= datum
.to_llref();
1942 let discr
= adt
::trans_get_discr(bcx
, &repr
, llexpr_ptr
,
1943 Some(Type
::i64(ccx
)), true);
1944 ll_t_in
= val_ty(discr
);
1945 (discr
, adt
::is_discr_signed(&repr
))
1947 (datum
.to_llscalarish(bcx
), t_in
.is_signed())
1950 let newval
= match (r_t_in
, r_t_out
) {
1951 (Ptr(_
), Ptr(_
)) | (FnPtr
, Ptr(_
)) | (RPtr(_
), Ptr(_
)) => {
1952 PointerCast(bcx
, llexpr
, ll_t_out
)
1954 (Ptr(_
), Int(_
)) | (FnPtr
, Int(_
)) => PtrToInt(bcx
, llexpr
, ll_t_out
),
1955 (Int(_
), Ptr(_
)) => IntToPtr(bcx
, llexpr
, ll_t_out
),
1957 (Int(_
), Int(_
)) => int_cast(bcx
, ll_t_out
, ll_t_in
, llexpr
, signed
),
1958 (Float
, Float
) => float_cast(bcx
, ll_t_out
, ll_t_in
, llexpr
),
1959 (Int(_
), Float
) if signed
=> SIToFP(bcx
, llexpr
, ll_t_out
),
1960 (Int(_
), Float
) => UIToFP(bcx
, llexpr
, ll_t_out
),
1961 (Float
, Int(I
)) => FPToSI(bcx
, llexpr
, ll_t_out
),
1962 (Float
, Int(_
)) => FPToUI(bcx
, llexpr
, ll_t_out
),
1964 _
=> span_bug
!(expr
.span
,
1965 "translating unsupported cast: \
1970 return immediate_rvalue_bcx(bcx
, newval
, t_out
).to_expr_datumblock();
1973 fn trans_assign_op
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1978 -> Block
<'blk
, 'tcx
> {
1979 let _icx
= push_ctxt("trans_assign_op");
1982 debug
!("trans_assign_op(expr={:?})", expr
);
1984 // User-defined operator methods cannot be used with `+=` etc right now
1985 assert
!(!bcx
.tcx().is_method_call(expr
.id
));
1987 // Evaluate LHS (destination), which should be an lvalue
1988 let dst
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, dst
, "assign_op"));
1989 assert
!(!bcx
.fcx
.type_needs_drop(dst
.ty
));
1990 let lhs
= load_ty(bcx
, dst
.val
, dst
.ty
);
1991 let lhs
= immediate_rvalue(lhs
, dst
.ty
);
1993 // Evaluate RHS - FIXME(#28160) this sucks
1994 let rhs
= unpack_datum
!(bcx
, trans(bcx
, &src
));
1995 let rhs
= unpack_datum
!(bcx
, rhs
.to_rvalue_datum(bcx
, "assign_op_rhs"));
1997 // Perform computation and store the result
1998 let result_datum
= unpack_datum
!(
1999 bcx
, trans_scalar_binop(bcx
, expr
, dst
.ty
, op
, lhs
, rhs
));
2000 return result_datum
.store_to(bcx
, dst
.val
);
2003 fn auto_ref
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2004 datum
: Datum
<'tcx
, Expr
>,
2006 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2009 // Ensure cleanup of `datum` if not already scheduled and obtain
2010 // a "by ref" pointer.
2011 let lv_datum
= unpack_datum
!(bcx
, datum
.to_lvalue_datum(bcx
, "autoref", expr
.id
));
2013 // Compute final type. Note that we are loose with the region and
2014 // mutability, since those things don't matter in trans.
2015 let referent_ty
= lv_datum
.ty
;
2016 let ptr_ty
= bcx
.tcx().mk_imm_ref(bcx
.tcx().mk_region(ty
::ReStatic
), referent_ty
);
2018 // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
2019 // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
2020 // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
2021 // indirection and for thin pointers, this has no ill effects.
2022 let kind
= if type_is_sized(bcx
.tcx(), referent_ty
) {
2023 RvalueExpr(Rvalue
::new(ByValue
))
2025 LvalueExpr(lv_datum
.kind
)
2029 let llref
= lv_datum
.to_llref();
2030 DatumBlock
::new(bcx
, Datum
::new(llref
, ptr_ty
, kind
))
2033 fn deref_multiple
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2035 datum
: Datum
<'tcx
, Expr
>,
2037 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2039 let mut datum
= datum
;
2041 let method_call
= MethodCall
::autoderef(expr
.id
, i
as u32);
2042 datum
= unpack_datum
!(bcx
, deref_once(bcx
, expr
, datum
, method_call
));
2044 DatumBlock { bcx: bcx, datum: datum }
2047 fn deref_once
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2049 datum
: Datum
<'tcx
, Expr
>,
2050 method_call
: MethodCall
)
2051 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2052 let ccx
= bcx
.ccx();
2054 debug
!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
2055 expr
, datum
, method_call
);
2059 // Check for overloaded deref.
2060 let method
= ccx
.tcx().tables
.borrow().method_map
.get(&method_call
).cloned();
2061 let datum
= match method
{
2063 let method_ty
= monomorphize_type(bcx
, method
.ty
);
2065 // Overloaded. Invoke the deref() method, which basically
2066 // converts from the `Smaht<T>` pointer that we have into
2067 // a `&T` pointer. We can then proceed down the normal
2068 // path (below) to dereference that `&T`.
2069 let datum
= if method_call
.autoderef
== 0 {
2072 // Always perform an AutoPtr when applying an overloaded auto-deref
2073 unpack_datum
!(bcx
, auto_ref(bcx
, datum
, expr
))
2076 let ref_ty
= // invoked methods have their LB regions instantiated
2077 ccx
.tcx().no_late_bound_regions(&method_ty
.fn_ret()).unwrap().unwrap();
2078 let scratch
= rvalue_scratch_datum(bcx
, ref_ty
, "overloaded_deref");
2080 bcx
= Callee
::method(bcx
, method
)
2081 .call(bcx
, expr
.debug_loc(),
2082 ArgOverloadedOp(datum
, None
),
2083 Some(SaveIn(scratch
.val
))).bcx
;
2084 scratch
.to_expr_datum()
2087 // Not overloaded. We already have a pointer we know how to deref.
2092 let r
= match datum
.ty
.sty
{
2093 ty
::TyBox(content_ty
) => {
2094 // Make sure we have an lvalue datum here to get the
2095 // proper cleanups scheduled
2096 let datum
= unpack_datum
!(
2097 bcx
, datum
.to_lvalue_datum(bcx
, "deref", expr
.id
));
2099 if type_is_sized(bcx
.tcx(), content_ty
) {
2100 let ptr
= load_ty(bcx
, datum
.val
, datum
.ty
);
2101 DatumBlock
::new(bcx
, Datum
::new(ptr
, content_ty
, LvalueExpr(datum
.kind
)))
2103 // A fat pointer and a DST lvalue have the same representation
2104 // just different types. Since there is no temporary for `*e`
2105 // here (because it is unsized), we cannot emulate the sized
2106 // object code path for running drop glue and free. Instead,
2107 // we schedule cleanup for `e`, turning it into an lvalue.
2109 let lval
= Lvalue
::new("expr::deref_once ty_uniq");
2110 let datum
= Datum
::new(datum
.val
, content_ty
, LvalueExpr(lval
));
2111 DatumBlock
::new(bcx
, datum
)
2115 ty
::TyRawPtr(ty
::TypeAndMut { ty: content_ty, .. }
) |
2116 ty
::TyRef(_
, ty
::TypeAndMut { ty: content_ty, .. }
) => {
2117 let lval
= Lvalue
::new("expr::deref_once ptr");
2118 if type_is_sized(bcx
.tcx(), content_ty
) {
2119 let ptr
= datum
.to_llscalarish(bcx
);
2121 // Always generate an lvalue datum, even if datum.mode is
2122 // an rvalue. This is because datum.mode is only an
2123 // rvalue for non-owning pointers like &T or *T, in which
2124 // case cleanup *is* scheduled elsewhere, by the true
2125 // owner (or, in the case of *T, by the user).
2126 DatumBlock
::new(bcx
, Datum
::new(ptr
, content_ty
, LvalueExpr(lval
)))
2128 // A fat pointer and a DST lvalue have the same representation
2129 // just different types.
2130 DatumBlock
::new(bcx
, Datum
::new(datum
.val
, content_ty
, LvalueExpr(lval
)))
2137 "deref invoked on expr of invalid type {:?}",
2142 debug
!("deref_once(expr={}, method_call={:?}, result={:?})",
2143 expr
.id
, method_call
, r
.datum
);
2158 fn codegen_strategy(&self) -> OverflowCodegen
{
2159 use self::OverflowCodegen
::{ViaIntrinsic, ViaInputCheck}
;
2161 OverflowOp
::Add
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Add
),
2162 OverflowOp
::Sub
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Sub
),
2163 OverflowOp
::Mul
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Mul
),
2165 OverflowOp
::Shl
=> ViaInputCheck(OverflowOpViaInputCheck
::Shl
),
2166 OverflowOp
::Shr
=> ViaInputCheck(OverflowOpViaInputCheck
::Shr
),
2171 enum OverflowCodegen
{
2172 ViaIntrinsic(OverflowOpViaIntrinsic
),
2173 ViaInputCheck(OverflowOpViaInputCheck
),
2176 enum OverflowOpViaInputCheck { Shl, Shr, }
2179 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2181 impl OverflowOpViaIntrinsic
{
2182 fn to_intrinsic
<'blk
, 'tcx
>(&self, bcx
: Block
<'blk
, 'tcx
>, lhs_ty
: Ty
) -> ValueRef
{
2183 let name
= self.to_intrinsic_name(bcx
.tcx(), lhs_ty
);
2184 bcx
.ccx().get_intrinsic(&name
)
2186 fn to_intrinsic_name(&self, tcx
: &TyCtxt
, ty
: Ty
) -> &'
static str {
2187 use syntax
::ast
::IntTy
::*;
2188 use syntax
::ast
::UintTy
::*;
2189 use rustc
::ty
::{TyInt, TyUint}
;
2191 let new_sty
= match ty
.sty
{
2192 TyInt(Is
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
2195 _
=> bug
!("unsupported target word size")
2197 TyUint(Us
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
2198 "32" => TyUint(U32
),
2199 "64" => TyUint(U64
),
2200 _
=> bug
!("unsupported target word size")
2202 ref t @
TyUint(_
) | ref t @
TyInt(_
) => t
.clone(),
2203 _
=> bug
!("tried to get overflow intrinsic for {:?} applied to non-int type",
2208 OverflowOpViaIntrinsic
::Add
=> match new_sty
{
2209 TyInt(I8
) => "llvm.sadd.with.overflow.i8",
2210 TyInt(I16
) => "llvm.sadd.with.overflow.i16",
2211 TyInt(I32
) => "llvm.sadd.with.overflow.i32",
2212 TyInt(I64
) => "llvm.sadd.with.overflow.i64",
2214 TyUint(U8
) => "llvm.uadd.with.overflow.i8",
2215 TyUint(U16
) => "llvm.uadd.with.overflow.i16",
2216 TyUint(U32
) => "llvm.uadd.with.overflow.i32",
2217 TyUint(U64
) => "llvm.uadd.with.overflow.i64",
2221 OverflowOpViaIntrinsic
::Sub
=> match new_sty
{
2222 TyInt(I8
) => "llvm.ssub.with.overflow.i8",
2223 TyInt(I16
) => "llvm.ssub.with.overflow.i16",
2224 TyInt(I32
) => "llvm.ssub.with.overflow.i32",
2225 TyInt(I64
) => "llvm.ssub.with.overflow.i64",
2227 TyUint(U8
) => "llvm.usub.with.overflow.i8",
2228 TyUint(U16
) => "llvm.usub.with.overflow.i16",
2229 TyUint(U32
) => "llvm.usub.with.overflow.i32",
2230 TyUint(U64
) => "llvm.usub.with.overflow.i64",
2234 OverflowOpViaIntrinsic
::Mul
=> match new_sty
{
2235 TyInt(I8
) => "llvm.smul.with.overflow.i8",
2236 TyInt(I16
) => "llvm.smul.with.overflow.i16",
2237 TyInt(I32
) => "llvm.smul.with.overflow.i32",
2238 TyInt(I64
) => "llvm.smul.with.overflow.i64",
2240 TyUint(U8
) => "llvm.umul.with.overflow.i8",
2241 TyUint(U16
) => "llvm.umul.with.overflow.i16",
2242 TyUint(U32
) => "llvm.umul.with.overflow.i32",
2243 TyUint(U64
) => "llvm.umul.with.overflow.i64",
2250 fn build_intrinsic_call
<'blk
, 'tcx
>(&self, bcx
: Block
<'blk
, 'tcx
>,
2251 info
: NodeIdAndSpan
,
2252 lhs_t
: Ty
<'tcx
>, lhs
: ValueRef
,
2254 binop_debug_loc
: DebugLoc
)
2255 -> (Block
<'blk
, 'tcx
>, ValueRef
) {
2256 let llfn
= self.to_intrinsic(bcx
, lhs_t
);
2258 let val
= Call(bcx
, llfn
, &[lhs
, rhs
], binop_debug_loc
);
2259 let result
= ExtractValue(bcx
, val
, 0); // iN operation result
2260 let overflow
= ExtractValue(bcx
, val
, 1); // i1 "did it overflow?"
2262 let cond
= ICmp(bcx
, llvm
::IntEQ
, overflow
, C_integral(Type
::i1(bcx
.ccx()), 1, false),
2265 let expect
= bcx
.ccx().get_intrinsic(&"llvm.expect.i1");
2266 Call(bcx
, expect
, &[cond
, C_integral(Type
::i1(bcx
.ccx()), 0, false)],
2270 base
::with_cond(bcx
, cond
, |bcx
|
2271 controlflow
::trans_fail(bcx
, info
,
2272 InternedString
::new("arithmetic operation overflowed")));
2278 impl OverflowOpViaInputCheck
{
2279 fn build_with_input_check
<'blk
, 'tcx
>(&self,
2280 bcx
: Block
<'blk
, 'tcx
>,
2281 info
: NodeIdAndSpan
,
2285 binop_debug_loc
: DebugLoc
)
2286 -> (Block
<'blk
, 'tcx
>, ValueRef
)
2288 let lhs_llty
= val_ty(lhs
);
2289 let rhs_llty
= val_ty(rhs
);
2291 // Panic if any bits are set outside of bits that we always
2294 // Note that the mask's value is derived from the LHS type
2295 // (since that is where the 32/64 distinction is relevant) but
2296 // the mask's type must match the RHS type (since they will
2297 // both be fed into an and-binop)
2298 let invert_mask
= shift_mask_val(bcx
, lhs_llty
, rhs_llty
, true);
2300 let outer_bits
= And(bcx
, rhs
, invert_mask
, binop_debug_loc
);
2301 let cond
= build_nonzero_check(bcx
, outer_bits
, binop_debug_loc
);
2302 let result
= match *self {
2303 OverflowOpViaInputCheck
::Shl
=>
2304 build_unchecked_lshift(bcx
, lhs
, rhs
, binop_debug_loc
),
2305 OverflowOpViaInputCheck
::Shr
=>
2306 build_unchecked_rshift(bcx
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2309 base
::with_cond(bcx
, cond
, |bcx
|
2310 controlflow
::trans_fail(bcx
, info
,
2311 InternedString
::new("shift operation overflowed")));
2317 // Check if an integer or vector contains a nonzero element.
2318 fn build_nonzero_check
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2320 binop_debug_loc
: DebugLoc
) -> ValueRef
{
2321 let llty
= val_ty(value
);
2322 let kind
= llty
.kind();
2324 TypeKind
::Integer
=> ICmp(bcx
, llvm
::IntNE
, value
, C_null(llty
), binop_debug_loc
),
2325 TypeKind
::Vector
=> {
2326 // Check if any elements of the vector are nonzero by treating
2327 // it as a wide integer and checking if the integer is nonzero.
2328 let width
= llty
.vector_length() as u64 * llty
.element_type().int_width();
2329 let int_value
= BitCast(bcx
, value
, Type
::ix(bcx
.ccx(), width
));
2330 build_nonzero_check(bcx
, int_value
, binop_debug_loc
)
2332 _
=> bug
!("build_nonzero_check: expected Integer or Vector, found {:?}", kind
),
2336 fn with_overflow_check
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, oop
: OverflowOp
, info
: NodeIdAndSpan
,
2337 lhs_t
: Ty
<'tcx
>, lhs
: ValueRef
,
2339 binop_debug_loc
: DebugLoc
)
2340 -> (Block
<'blk
, 'tcx
>, ValueRef
) {
2341 if bcx
.unreachable
.get() { return (bcx, _Undef(lhs)); }
2342 if bcx
.ccx().check_overflow() {
2344 match oop
.codegen_strategy() {
2345 OverflowCodegen
::ViaIntrinsic(oop
) =>
2346 oop
.build_intrinsic_call(bcx
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2347 OverflowCodegen
::ViaInputCheck(oop
) =>
2348 oop
.build_with_input_check(bcx
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2351 let res
= match oop
{
2352 OverflowOp
::Add
=> Add(bcx
, lhs
, rhs
, binop_debug_loc
),
2353 OverflowOp
::Sub
=> Sub(bcx
, lhs
, rhs
, binop_debug_loc
),
2354 OverflowOp
::Mul
=> Mul(bcx
, lhs
, rhs
, binop_debug_loc
),
2357 build_unchecked_lshift(bcx
, lhs
, rhs
, binop_debug_loc
),
2359 build_unchecked_rshift(bcx
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2365 /// We categorize expressions into three kinds. The distinction between
2366 /// lvalue/rvalue is fundamental to the language. The distinction between the
2367 /// two kinds of rvalues is an artifact of trans which reflects how we will
2368 /// generate code for that kind of expression. See trans/expr.rs for more
2370 #[derive(Copy, Clone)]
2378 fn expr_kind(tcx
: &TyCtxt
, expr
: &hir
::Expr
) -> ExprKind
{
2379 if tcx
.is_method_call(expr
.id
) {
2380 // Overloaded operations are generally calls, and hence they are
2381 // generated via DPS, but there are a few exceptions:
2382 return match expr
.node
{
2383 // `a += b` has a unit result.
2384 hir
::ExprAssignOp(..) => ExprKind
::RvalueStmt
,
2386 // the deref method invoked for `*a` always yields an `&T`
2387 hir
::ExprUnary(hir
::UnDeref
, _
) => ExprKind
::Lvalue
,
2389 // the index method invoked for `a[i]` always yields an `&T`
2390 hir
::ExprIndex(..) => ExprKind
::Lvalue
,
2392 // in the general case, result could be any type, use DPS
2393 _
=> ExprKind
::RvalueDps
2398 hir
::ExprPath(..) => {
2399 match tcx
.resolve_expr(expr
) {
2400 // Put functions and ctors with the ADTs, as they
2401 // are zero-sized, so DPS is the cheapest option.
2402 Def
::Struct(..) | Def
::Variant(..) |
2403 Def
::Fn(..) | Def
::Method(..) => {
2407 // Note: there is actually a good case to be made that
2408 // DefArg's, particularly those of immediate type, ought to
2409 // considered rvalues.
2412 Def
::Local(..) => ExprKind
::Lvalue
,
2415 Def
::AssociatedConst(..) => ExprKind
::RvalueDatum
,
2420 "uncategorized def for expr {}: {:?}",
2427 hir
::ExprType(ref expr
, _
) => {
2428 expr_kind(tcx
, expr
)
2431 hir
::ExprUnary(hir
::UnDeref
, _
) |
2432 hir
::ExprField(..) |
2433 hir
::ExprTupField(..) |
2434 hir
::ExprIndex(..) => {
2439 hir
::ExprMethodCall(..) |
2440 hir
::ExprStruct(..) |
2443 hir
::ExprMatch(..) |
2444 hir
::ExprClosure(..) |
2445 hir
::ExprBlock(..) |
2446 hir
::ExprRepeat(..) |
2447 hir
::ExprVec(..) => {
2451 hir
::ExprLit(ref lit
) if lit
.node
.is_str() => {
2455 hir
::ExprBreak(..) |
2456 hir
::ExprAgain(..) |
2458 hir
::ExprWhile(..) |
2460 hir
::ExprAssign(..) |
2461 hir
::ExprInlineAsm(..) |
2462 hir
::ExprAssignOp(..) => {
2463 ExprKind
::RvalueStmt
2466 hir
::ExprLit(_
) | // Note: LitStr is carved out above
2467 hir
::ExprUnary(..) |
2469 hir
::ExprAddrOf(..) |
2470 hir
::ExprBinary(..) |
2471 hir
::ExprCast(..) => {
2472 ExprKind
::RvalueDatum