1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_var -> Datum`: looks up a local variable, upvar or static.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest
::*;
52 use self::lazy_binop_ty
::*;
54 use llvm
::{self, ValueRef, TypeKind}
;
55 use middle
::const_qualif
::ConstQualif
;
56 use rustc
::hir
::def
::Def
;
57 use rustc
::ty
::subst
::Substs
;
58 use {_match, abi, adt, asm, base, closure, consts, controlflow}
;
61 use callee
::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp}
;
62 use cleanup
::{self, CleanupMethods, DropHintMethods}
;
65 use debuginfo
::{self, DebugLoc, ToDebugLoc}
;
72 use rustc
::ty
::adjustment
::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer}
;
73 use rustc
::ty
::adjustment
::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}
;
74 use rustc
::ty
::adjustment
::CustomCoerceUnsized
;
75 use rustc
::ty
::{self, Ty, TyCtxt}
;
76 use rustc
::ty
::MethodCall
;
77 use rustc
::ty
::cast
::{CastKind, CastTy}
;
78 use util
::common
::indenter
;
79 use machine
::{llsize_of, llsize_of_alloc}
;
85 use syntax
::parse
::token
::InternedString
;
92 // These are passed around by the code generating functions to track the
93 // destination of a computation's value.
95 #[derive(Copy, Clone, PartialEq)]
101 impl fmt
::Debug
for Dest
{
102 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
104 SaveIn(v
) => write
!(f
, "SaveIn({:?})", Value(v
)),
105 Ignore
=> f
.write_str("Ignore")
110 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
111 /// better optimized LLVM code.
112 pub fn trans_into
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
115 -> Block
<'blk
, 'tcx
> {
118 expr
.debug_loc().apply(bcx
.fcx
);
120 if adjustment_required(bcx
, expr
) {
121 // use trans, which may be less efficient but
122 // which will perform the adjustments:
123 let datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
124 return datum
.store_to_dest(bcx
, dest
, expr
.id
);
127 let qualif
= *bcx
.tcx().const_qualif_map
.borrow().get(&expr
.id
).unwrap();
128 if !qualif
.intersects(ConstQualif
::NOT_CONST
| ConstQualif
::NEEDS_DROP
) {
129 if !qualif
.intersects(ConstQualif
::PREFER_IN_PLACE
) {
130 if let SaveIn(lldest
) = dest
{
131 match consts
::get_const_expr_as_global(bcx
.ccx(), expr
, qualif
,
132 bcx
.fcx
.param_substs
,
133 consts
::TrueConst
::No
) {
135 // Cast pointer to destination, because constants
136 // have different types.
137 let lldest
= PointerCast(bcx
, lldest
, val_ty(global
));
138 memcpy_ty(bcx
, lldest
, global
, expr_ty_adjusted(bcx
, expr
));
141 Err(consts
::ConstEvalFailure
::Runtime(_
)) => {
142 // in case const evaluation errors, translate normally
143 // debug assertions catch the same errors
146 Err(consts
::ConstEvalFailure
::Compiletime(_
)) => {
152 // If we see a const here, that's because it evaluates to a type with zero size. We
153 // should be able to just discard it, since const expressions are guaranteed not to
154 // have side effects. This seems to be reached through tuple struct constructors being
155 // passed zero-size constants.
156 if let hir
::ExprPath(..) = expr
.node
{
157 match bcx
.tcx().expect_def(expr
.id
) {
158 Def
::Const(_
) | Def
::AssociatedConst(_
) => {
159 assert
!(type_is_zero_size(bcx
.ccx(), bcx
.tcx().node_id_to_type(expr
.id
)));
166 // Even if we don't have a value to emit, and the expression
167 // doesn't have any side-effects, we still have to translate the
168 // body of any closures.
169 // FIXME: Find a better way of handling this case.
171 // The only way we're going to see a `const` at this point is if
172 // it prefers in-place instantiation, likely because it contains
173 // `[x; N]` somewhere within.
175 hir
::ExprPath(..) => {
176 match bcx
.tcx().expect_def(expr
.id
) {
177 Def
::Const(did
) | Def
::AssociatedConst(did
) => {
178 let empty_substs
= bcx
.tcx().mk_substs(Substs
::empty());
179 let const_expr
= consts
::get_const_expr(bcx
.ccx(), did
, expr
,
181 // Temporarily get cleanup scopes out of the way,
182 // as they require sub-expressions to be contained
183 // inside the current AST scope.
184 // These should record no cleanups anyways, `const`
185 // can't have destructors.
186 let scopes
= mem
::replace(&mut *bcx
.fcx
.scopes
.borrow_mut(),
188 // Lock emitted debug locations to the location of
189 // the constant reference expression.
190 debuginfo
::with_source_location_override(bcx
.fcx
,
193 bcx
= trans_into(bcx
, const_expr
, dest
)
195 let scopes
= mem
::replace(&mut *bcx
.fcx
.scopes
.borrow_mut(),
197 assert
!(scopes
.is_empty());
208 debug
!("trans_into() expr={:?}", expr
);
210 let cleanup_debug_loc
= debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
214 bcx
.fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
216 let kind
= expr_kind(bcx
.tcx(), expr
);
218 ExprKind
::Lvalue
| ExprKind
::RvalueDatum
=> {
219 trans_unadjusted(bcx
, expr
).store_to_dest(dest
, expr
.id
)
221 ExprKind
::RvalueDps
=> {
222 trans_rvalue_dps_unadjusted(bcx
, expr
, dest
)
224 ExprKind
::RvalueStmt
=> {
225 trans_rvalue_stmt_unadjusted(bcx
, expr
)
229 bcx
.fcx
.pop_and_trans_ast_cleanup_scope(bcx
, expr
.id
)
232 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
233 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
235 pub fn trans
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
237 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
238 debug
!("trans(expr={:?})", expr
);
242 let qualif
= *bcx
.tcx().const_qualif_map
.borrow().get(&expr
.id
).unwrap();
243 let adjusted_global
= !qualif
.intersects(ConstQualif
::NON_STATIC_BORROWS
);
244 let global
= if !qualif
.intersects(ConstQualif
::NOT_CONST
| ConstQualif
::NEEDS_DROP
) {
245 match consts
::get_const_expr_as_global(bcx
.ccx(), expr
, qualif
,
246 bcx
.fcx
.param_substs
,
247 consts
::TrueConst
::No
) {
249 if qualif
.intersects(ConstQualif
::HAS_STATIC_BORROWS
) {
250 // Is borrowed as 'static, must return lvalue.
252 // Cast pointer to global, because constants have different types.
253 let const_ty
= expr_ty_adjusted(bcx
, expr
);
254 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
255 let global
= PointerCast(bcx
, global
, llty
.ptr_to());
256 let datum
= Datum
::new(global
, const_ty
, Lvalue
::new("expr::trans"));
257 return DatumBlock
::new(bcx
, datum
.to_expr_datum());
260 // Otherwise, keep around and perform adjustments, if needed.
261 let const_ty
= if adjusted_global
{
262 expr_ty_adjusted(bcx
, expr
)
267 // This could use a better heuristic.
268 Some(if type_is_immediate(bcx
.ccx(), const_ty
) {
269 // Cast pointer to global, because constants have different types.
270 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
271 let global
= PointerCast(bcx
, global
, llty
.ptr_to());
272 // Maybe just get the value directly, instead of loading it?
273 immediate_rvalue(load_ty(bcx
, global
, const_ty
), const_ty
)
275 let scratch
= alloc_ty(bcx
, const_ty
, "const");
276 call_lifetime_start(bcx
, scratch
);
277 let lldest
= if !const_ty
.is_structural() {
278 // Cast pointer to slot, because constants have different types.
279 PointerCast(bcx
, scratch
, val_ty(global
))
281 // In this case, memcpy_ty calls llvm.memcpy after casting both
282 // source and destination to i8*, so we don't need any casts.
285 memcpy_ty(bcx
, lldest
, global
, const_ty
);
286 Datum
::new(scratch
, const_ty
, Rvalue
::new(ByRef
))
289 Err(consts
::ConstEvalFailure
::Runtime(_
)) => {
290 // in case const evaluation errors, translate normally
291 // debug assertions catch the same errors
295 Err(consts
::ConstEvalFailure
::Compiletime(_
)) => {
296 // generate a dummy llvm value
297 let const_ty
= expr_ty(bcx
, expr
);
298 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
299 let dummy
= C_undef(llty
.ptr_to());
300 Some(Datum
::new(dummy
, const_ty
, Rvalue
::new(ByRef
)))
307 let cleanup_debug_loc
= debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
311 fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
312 let datum
= match global
{
313 Some(rvalue
) => rvalue
.to_expr_datum(),
314 None
=> unpack_datum
!(bcx
, trans_unadjusted(bcx
, expr
))
316 let datum
= if adjusted_global
{
317 datum
// trans::consts already performed adjustments.
319 unpack_datum
!(bcx
, apply_adjustments(bcx
, expr
, datum
))
321 bcx
= fcx
.pop_and_trans_ast_cleanup_scope(bcx
, expr
.id
);
322 return DatumBlock
::new(bcx
, datum
);
325 pub fn get_meta(bcx
: Block
, fat_ptr
: ValueRef
) -> ValueRef
{
326 StructGEP(bcx
, fat_ptr
, abi
::FAT_PTR_EXTRA
)
329 pub fn get_dataptr(bcx
: Block
, fat_ptr
: ValueRef
) -> ValueRef
{
330 StructGEP(bcx
, fat_ptr
, abi
::FAT_PTR_ADDR
)
333 pub fn copy_fat_ptr(bcx
: Block
, src_ptr
: ValueRef
, dst_ptr
: ValueRef
) {
334 Store(bcx
, Load(bcx
, get_dataptr(bcx
, src_ptr
)), get_dataptr(bcx
, dst_ptr
));
335 Store(bcx
, Load(bcx
, get_meta(bcx
, src_ptr
)), get_meta(bcx
, dst_ptr
));
338 fn adjustment_required
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
339 expr
: &hir
::Expr
) -> bool
{
340 let adjustment
= match bcx
.tcx().tables
.borrow().adjustments
.get(&expr
.id
).cloned() {
341 None
=> { return false; }
345 // Don't skip a conversion from Box<T> to &T, etc.
346 if bcx
.tcx().is_overloaded_autoderef(expr
.id
, 0) {
351 AdjustNeverToAny(..) => true,
352 AdjustReifyFnPointer
=> true,
353 AdjustUnsafeFnPointer
| AdjustMutToConstPointer
=> {
354 // purely a type-level thing
357 AdjustDerefRef(ref adj
) => {
358 // We are a bit paranoid about adjustments and thus might have a re-
359 // borrow here which merely derefs and then refs again (it might have
360 // a different region or mutability, but we don't care here).
361 !(adj
.autoderefs
== 1 && adj
.autoref
.is_some() && adj
.unsize
.is_none())
366 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
367 /// translation of `expr`.
368 fn apply_adjustments
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
370 datum
: Datum
<'tcx
, Expr
>)
371 -> DatumBlock
<'blk
, 'tcx
, Expr
>
374 let mut datum
= datum
;
375 let adjustment
= match bcx
.tcx().tables
.borrow().adjustments
.get(&expr
.id
).cloned() {
377 return DatumBlock
::new(bcx
, datum
);
381 debug
!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
382 expr
, datum
, adjustment
);
384 AdjustNeverToAny(ref target
) => {
385 let mono_target
= bcx
.monomorphize(target
);
386 let llty
= type_of
::type_of(bcx
.ccx(), mono_target
);
387 let dummy
= C_undef(llty
.ptr_to());
388 datum
= Datum
::new(dummy
, mono_target
, Lvalue
::new("never")).to_expr_datum();
390 AdjustReifyFnPointer
=> {
392 ty
::TyFnDef(def_id
, substs
, _
) => {
393 datum
= Callee
::def(bcx
.ccx(), def_id
, substs
)
394 .reify(bcx
.ccx()).to_expr_datum();
397 bug
!("{} cannot be reified to a fn ptr", datum
.ty
)
401 AdjustUnsafeFnPointer
| AdjustMutToConstPointer
=> {
402 // purely a type-level thing
404 AdjustDerefRef(ref adj
) => {
405 let skip_reborrows
= if adj
.autoderefs
== 1 && adj
.autoref
.is_some() {
406 // We are a bit paranoid about adjustments and thus might have a re-
407 // borrow here which merely derefs and then refs again (it might have
408 // a different region or mutability, but we don't care here).
410 // Don't skip a conversion from Box<T> to &T, etc.
412 if bcx
.tcx().is_overloaded_autoderef(expr
.id
, 0) {
413 // Don't skip an overloaded deref.
425 if adj
.autoderefs
> skip_reborrows
{
427 let lval
= unpack_datum
!(bcx
, datum
.to_lvalue_datum(bcx
, "auto_deref", expr
.id
));
428 datum
= unpack_datum
!(bcx
, deref_multiple(bcx
, expr
,
429 lval
.to_expr_datum(),
430 adj
.autoderefs
- skip_reborrows
));
433 // (You might think there is a more elegant way to do this than a
434 // skip_reborrows bool, but then you remember that the borrow checker exists).
435 if skip_reborrows
== 0 && adj
.autoref
.is_some() {
436 datum
= unpack_datum
!(bcx
, auto_ref(bcx
, datum
, expr
));
439 if let Some(target
) = adj
.unsize
{
440 // We do not arrange cleanup ourselves; if we already are an
441 // L-value, then cleanup will have already been scheduled (and
442 // the `datum.to_rvalue_datum` call below will emit code to zero
443 // the drop flag when moving out of the L-value). If we are an
444 // R-value, then we do not need to schedule cleanup.
445 let source_datum
= unpack_datum
!(bcx
,
446 datum
.to_rvalue_datum(bcx
, "__coerce_source"));
448 let target
= bcx
.monomorphize(&target
);
450 let scratch
= alloc_ty(bcx
, target
, "__coerce_target");
451 call_lifetime_start(bcx
, scratch
);
452 let target_datum
= Datum
::new(scratch
, target
,
454 bcx
= coerce_unsized(bcx
, expr
.span
, source_datum
, target_datum
);
455 datum
= Datum
::new(scratch
, target
,
456 RvalueExpr(Rvalue
::new(ByRef
)));
460 debug
!("after adjustments, datum={:?}", datum
);
461 DatumBlock
::new(bcx
, datum
)
464 fn coerce_unsized
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
465 span
: syntax_pos
::Span
,
466 source
: Datum
<'tcx
, Rvalue
>,
467 target
: Datum
<'tcx
, Rvalue
>)
468 -> Block
<'blk
, 'tcx
> {
470 debug
!("coerce_unsized({:?} -> {:?})", source
, target
);
472 match (&source
.ty
.sty
, &target
.ty
.sty
) {
473 (&ty
::TyBox(a
), &ty
::TyBox(b
)) |
474 (&ty
::TyRef(_
, ty
::TypeAndMut { ty: a, .. }
),
475 &ty
::TyRef(_
, ty
::TypeAndMut { ty: b, .. }
)) |
476 (&ty
::TyRef(_
, ty
::TypeAndMut { ty: a, .. }
),
477 &ty
::TyRawPtr(ty
::TypeAndMut { ty: b, .. }
)) |
478 (&ty
::TyRawPtr(ty
::TypeAndMut { ty: a, .. }
),
479 &ty
::TyRawPtr(ty
::TypeAndMut { ty: b, .. }
)) => {
480 let (inner_source
, inner_target
) = (a
, b
);
482 let (base
, old_info
) = if !type_is_sized(bcx
.tcx(), inner_source
) {
483 // Normally, the source is a thin pointer and we are
484 // adding extra info to make a fat pointer. The exception
485 // is when we are upcasting an existing object fat pointer
486 // to use a different vtable. In that case, we want to
487 // load out the original data pointer so we can repackage
489 (Load(bcx
, get_dataptr(bcx
, source
.val
)),
490 Some(Load(bcx
, get_meta(bcx
, source
.val
))))
492 let val
= if source
.kind
.is_by_ref() {
493 load_ty(bcx
, source
.val
, source
.ty
)
500 let info
= unsized_info(bcx
.ccx(), inner_source
, inner_target
, old_info
);
502 // Compute the base pointer. This doesn't change the pointer value,
503 // but merely its type.
504 let ptr_ty
= type_of
::in_memory_type_of(bcx
.ccx(), inner_target
).ptr_to();
505 let base
= PointerCast(bcx
, base
, ptr_ty
);
507 Store(bcx
, base
, get_dataptr(bcx
, target
.val
));
508 Store(bcx
, info
, get_meta(bcx
, target
.val
));
511 // This can be extended to enums and tuples in the future.
512 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
513 (&ty
::TyStruct(def_id_a
, _
), &ty
::TyStruct(def_id_b
, _
)) => {
514 assert_eq
!(def_id_a
, def_id_b
);
516 // The target is already by-ref because it's to be written to.
517 let source
= unpack_datum
!(bcx
, source
.to_ref_datum(bcx
));
518 assert
!(target
.kind
.is_by_ref());
520 let kind
= custom_coerce_unsize_info(bcx
.ccx().shared(),
524 let repr_source
= adt
::represent_type(bcx
.ccx(), source
.ty
);
525 let src_fields
= match &*repr_source
{
526 &adt
::Repr
::Univariant(ref s
, _
) => &s
.fields
,
528 "Non univariant struct? (repr_source: {:?})",
531 let repr_target
= adt
::represent_type(bcx
.ccx(), target
.ty
);
532 let target_fields
= match &*repr_target
{
533 &adt
::Repr
::Univariant(ref s
, _
) => &s
.fields
,
535 "Non univariant struct? (repr_target: {:?})",
539 let coerce_index
= match kind
{
540 CustomCoerceUnsized
::Struct(i
) => i
542 assert
!(coerce_index
< src_fields
.len() && src_fields
.len() == target_fields
.len());
544 let source_val
= adt
::MaybeSizedValue
::sized(source
.val
);
545 let target_val
= adt
::MaybeSizedValue
::sized(target
.val
);
547 let iter
= src_fields
.iter().zip(target_fields
).enumerate();
548 for (i
, (src_ty
, target_ty
)) in iter
{
549 let ll_source
= adt
::trans_field_ptr(bcx
, &repr_source
, source_val
, Disr(0), i
);
550 let ll_target
= adt
::trans_field_ptr(bcx
, &repr_target
, target_val
, Disr(0), i
);
552 // If this is the field we need to coerce, recurse on it.
553 if i
== coerce_index
{
554 coerce_unsized(bcx
, span
,
555 Datum
::new(ll_source
, src_ty
,
557 Datum
::new(ll_target
, target_ty
,
558 Rvalue
::new(ByRef
)));
560 // Otherwise, simply copy the data from the source.
561 assert
!(src_ty
.is_phantom_data() || src_ty
== target_ty
);
562 memcpy_ty(bcx
, ll_target
, ll_source
, src_ty
);
566 _
=> bug
!("coerce_unsized: invalid coercion {:?} -> {:?}",
573 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
574 /// that the expr represents.
576 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
577 /// something like `x().f` is translated into roughly the equivalent of
579 /// { tmp = x(); tmp.f }
580 pub fn trans_to_lvalue
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
583 -> DatumBlock
<'blk
, 'tcx
, Lvalue
> {
585 let datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
586 return datum
.to_lvalue_datum(bcx
, name
, expr
.id
);
589 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
591 fn trans_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
593 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
596 debug
!("trans_unadjusted(expr={:?})", expr
);
597 let _indenter
= indenter();
599 expr
.debug_loc().apply(bcx
.fcx
);
601 return match expr_kind(bcx
.tcx(), expr
) {
602 ExprKind
::Lvalue
| ExprKind
::RvalueDatum
=> {
603 let datum
= unpack_datum
!(bcx
, {
604 trans_datum_unadjusted(bcx
, expr
)
607 DatumBlock {bcx: bcx, datum: datum}
610 ExprKind
::RvalueStmt
=> {
611 bcx
= trans_rvalue_stmt_unadjusted(bcx
, expr
);
612 nil(bcx
, expr_ty(bcx
, expr
))
615 ExprKind
::RvalueDps
=> {
616 let ty
= expr_ty(bcx
, expr
);
617 if type_is_zero_size(bcx
.ccx(), ty
) {
618 bcx
= trans_rvalue_dps_unadjusted(bcx
, expr
, Ignore
);
621 let scratch
= rvalue_scratch_datum(bcx
, ty
, "");
622 bcx
= trans_rvalue_dps_unadjusted(
623 bcx
, expr
, SaveIn(scratch
.val
));
625 // Note: this is not obviously a good idea. It causes
626 // immediate values to be loaded immediately after a
627 // return from a call or other similar expression,
628 // which in turn leads to alloca's having shorter
629 // lifetimes and hence larger stack frames. However,
630 // in turn it can lead to more register pressure.
631 // Still, in practice it seems to increase
632 // performance, since we have fewer problems with
634 let scratch
= unpack_datum
!(
635 bcx
, scratch
.to_appropriate_datum(bcx
));
637 DatumBlock
::new(bcx
, scratch
.to_expr_datum())
642 fn nil
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, ty
: Ty
<'tcx
>)
643 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
644 let llval
= C_undef(type_of
::type_of(bcx
.ccx(), ty
));
645 let datum
= immediate_rvalue(llval
, ty
);
646 DatumBlock
::new(bcx
, datum
.to_expr_datum())
650 fn trans_datum_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
652 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
655 let _icx
= push_ctxt("trans_datum_unadjusted");
658 hir
::ExprType(ref e
, _
) => {
661 hir
::ExprPath(..) => {
662 let var
= trans_var(bcx
, bcx
.tcx().expect_def(expr
.id
));
663 DatumBlock
::new(bcx
, var
.to_expr_datum())
665 hir
::ExprField(ref base
, name
) => {
666 trans_rec_field(bcx
, &base
, name
.node
)
668 hir
::ExprTupField(ref base
, idx
) => {
669 trans_rec_tup_field(bcx
, &base
, idx
.node
)
671 hir
::ExprIndex(ref base
, ref idx
) => {
672 trans_index(bcx
, expr
, &base
, &idx
, MethodCall
::expr(expr
.id
))
674 hir
::ExprBox(ref contents
) => {
675 // Special case for `Box<T>`
676 let box_ty
= expr_ty(bcx
, expr
);
677 let contents_ty
= expr_ty(bcx
, &contents
);
680 trans_uniq_expr(bcx
, expr
, box_ty
, &contents
, contents_ty
)
682 _
=> span_bug
!(expr
.span
,
683 "expected unique box")
687 hir
::ExprLit(ref lit
) => trans_immediate_lit(bcx
, expr
, &lit
),
688 hir
::ExprBinary(op
, ref lhs
, ref rhs
) => {
689 trans_binary(bcx
, expr
, op
, &lhs
, &rhs
)
691 hir
::ExprUnary(op
, ref x
) => {
692 trans_unary(bcx
, expr
, op
, &x
)
694 hir
::ExprAddrOf(_
, ref x
) => {
696 hir
::ExprRepeat(..) | hir
::ExprVec(..) => {
697 // Special case for slices.
698 let cleanup_debug_loc
=
699 debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
703 fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
704 let datum
= unpack_datum
!(
705 bcx
, tvec
::trans_slice_vec(bcx
, expr
, &x
));
706 bcx
= fcx
.pop_and_trans_ast_cleanup_scope(bcx
, x
.id
);
707 DatumBlock
::new(bcx
, datum
)
710 trans_addr_of(bcx
, expr
, &x
)
714 hir
::ExprCast(ref val
, _
) => {
715 // Datum output mode means this is a scalar cast:
716 trans_imm_cast(bcx
, &val
, expr
.id
)
721 "trans_rvalue_datum_unadjusted reached \
722 fall-through case: {:?}",
728 fn trans_field
<'blk
, 'tcx
, F
>(bcx
: Block
<'blk
, 'tcx
>,
731 -> DatumBlock
<'blk
, 'tcx
, Expr
> where
732 F
: FnOnce(TyCtxt
<'blk
, 'tcx
, 'tcx
>, &VariantInfo
<'tcx
>) -> usize,
735 let _icx
= push_ctxt("trans_rec_field");
737 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, base
, "field"));
738 let bare_ty
= base_datum
.ty
;
739 let repr
= adt
::represent_type(bcx
.ccx(), bare_ty
);
740 let vinfo
= VariantInfo
::from_ty(bcx
.tcx(), bare_ty
, None
);
742 let ix
= get_idx(bcx
.tcx(), &vinfo
);
743 let d
= base_datum
.get_element(
747 adt
::trans_field_ptr(bcx
, &repr
, srcval
, vinfo
.discr
, ix
)
750 if type_is_sized(bcx
.tcx(), d
.ty
) {
751 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
753 let scratch
= rvalue_scratch_datum(bcx
, d
.ty
, "");
754 Store(bcx
, d
.val
, get_dataptr(bcx
, scratch
.val
));
755 let info
= Load(bcx
, get_meta(bcx
, base_datum
.val
));
756 Store(bcx
, info
, get_meta(bcx
, scratch
.val
));
758 // Always generate an lvalue datum, because this pointer doesn't own
759 // the data and cleanup is scheduled elsewhere.
760 DatumBlock
::new(bcx
, Datum
::new(scratch
.val
, scratch
.ty
, LvalueExpr(d
.kind
)))
764 /// Translates `base.field`.
765 fn trans_rec_field
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
768 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
769 trans_field(bcx
, base
, |_
, vinfo
| vinfo
.field_index(field
))
772 /// Translates `base.<idx>`.
773 fn trans_rec_tup_field
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
776 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
777 trans_field(bcx
, base
, |_
, _
| idx
)
780 fn trans_index
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
781 index_expr
: &hir
::Expr
,
784 method_call
: MethodCall
)
785 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
786 //! Translates `base[idx]`.
788 let _icx
= push_ctxt("trans_index");
792 let index_expr_debug_loc
= index_expr
.debug_loc();
794 // Check for overloaded index.
795 let method
= ccx
.tcx().tables
.borrow().method_map
.get(&method_call
).cloned();
796 let elt_datum
= match method
{
798 let method_ty
= monomorphize_type(bcx
, method
.ty
);
800 let base_datum
= unpack_datum
!(bcx
, trans(bcx
, base
));
802 // Translate index expression.
803 let ix_datum
= unpack_datum
!(bcx
, trans(bcx
, idx
));
805 let ref_ty
= // invoked methods have LB regions instantiated:
806 bcx
.tcx().no_late_bound_regions(&method_ty
.fn_ret()).unwrap();
807 let elt_ty
= match ref_ty
.builtin_deref(true, ty
::NoPreference
) {
809 span_bug
!(index_expr
.span
,
810 "index method didn't return a \
811 dereferenceable type?!")
813 Some(elt_tm
) => elt_tm
.ty
,
816 // Overloaded. Invoke the index() method, which basically
817 // yields a `&T` pointer. We can then proceed down the
818 // normal path (below) to dereference that `&T`.
819 let scratch
= rvalue_scratch_datum(bcx
, ref_ty
, "overloaded_index_elt");
821 bcx
= Callee
::method(bcx
, method
)
822 .call(bcx
, index_expr_debug_loc
,
823 ArgOverloadedOp(base_datum
, Some(ix_datum
)),
824 Some(SaveIn(scratch
.val
))).bcx
;
826 let datum
= scratch
.to_expr_datum();
827 let lval
= Lvalue
::new("expr::trans_index overload");
828 if type_is_sized(bcx
.tcx(), elt_ty
) {
829 Datum
::new(datum
.to_llscalarish(bcx
), elt_ty
, LvalueExpr(lval
))
831 Datum
::new(datum
.val
, elt_ty
, LvalueExpr(lval
))
835 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
,
839 // Translate index expression and cast to a suitable LLVM integer.
840 // Rust is less strict than LLVM in this regard.
841 let ix_datum
= unpack_datum
!(bcx
, trans(bcx
, idx
));
842 let ix_val
= ix_datum
.to_llscalarish(bcx
);
843 let ix_size
= machine
::llbitsize_of_real(bcx
.ccx(),
845 let int_size
= machine
::llbitsize_of_real(bcx
.ccx(),
848 if ix_size
< int_size
{
849 if expr_ty(bcx
, idx
).is_signed() {
850 SExt(bcx
, ix_val
, ccx
.int_type())
851 } else { ZExt(bcx, ix_val, ccx.int_type()) }
852 } else if ix_size
> int_size
{
853 Trunc(bcx
, ix_val
, ccx
.int_type())
859 let unit_ty
= base_datum
.ty
.sequence_element_type(bcx
.tcx());
861 let (base
, len
) = base_datum
.get_vec_base_and_len(bcx
);
863 debug
!("trans_index: base {:?}", Value(base
));
864 debug
!("trans_index: len {:?}", Value(len
));
866 let bounds_check
= ICmp(bcx
,
870 index_expr_debug_loc
);
871 let expect
= ccx
.get_intrinsic(&("llvm.expect.i1"));
872 let expected
= Call(bcx
,
874 &[bounds_check
, C_bool(ccx
, false)],
875 index_expr_debug_loc
);
876 bcx
= with_cond(bcx
, expected
, |bcx
| {
877 controlflow
::trans_fail_bounds_check(bcx
,
878 expr_info(index_expr
),
882 let elt
= InBoundsGEP(bcx
, base
, &[ix_val
]);
883 let elt
= PointerCast(bcx
, elt
, type_of
::type_of(ccx
, unit_ty
).ptr_to());
884 let lval
= Lvalue
::new("expr::trans_index fallback");
885 Datum
::new(elt
, unit_ty
, LvalueExpr(lval
))
889 DatumBlock
::new(bcx
, elt_datum
)
892 /// Translates a reference to a variable.
893 pub fn trans_var
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, def
: Def
)
894 -> Datum
<'tcx
, Lvalue
> {
897 Def
::Static(did
, _
) => consts
::get_static(bcx
.ccx(), did
),
898 Def
::Upvar(_
, nid
, _
, _
) => {
899 // Can't move upvars, so this is never a ZeroMemLastUse.
900 let local_ty
= node_id_type(bcx
, nid
);
901 let lval
= Lvalue
::new_with_hint("expr::trans_var (upvar)",
902 bcx
, nid
, HintKind
::ZeroAndMaintain
);
903 match bcx
.fcx
.llupvars
.borrow().get(&nid
) {
904 Some(&val
) => Datum
::new(val
, local_ty
, lval
),
906 bug
!("trans_var: no llval for upvar {} found", nid
);
910 Def
::Local(_
, nid
) => {
911 let datum
= match bcx
.fcx
.lllocals
.borrow().get(&nid
) {
914 bug
!("trans_var: no datum for local/arg {} found", nid
);
917 debug
!("take_local(nid={}, v={:?}, ty={})",
918 nid
, Value(datum
.val
), datum
.ty
);
921 _
=> bug
!("{:?} should not reach expr::trans_var", def
)
925 fn trans_rvalue_stmt_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
927 -> Block
<'blk
, 'tcx
> {
929 let _icx
= push_ctxt("trans_rvalue_stmt");
931 if bcx
.unreachable
.get() {
935 expr
.debug_loc().apply(bcx
.fcx
);
938 hir
::ExprBreak(label_opt
) => {
939 controlflow
::trans_break(bcx
, expr
, label_opt
.map(|l
| l
.node
))
941 hir
::ExprType(ref e
, _
) => {
942 trans_into(bcx
, &e
, Ignore
)
944 hir
::ExprAgain(label_opt
) => {
945 controlflow
::trans_cont(bcx
, expr
, label_opt
.map(|l
| l
.node
))
947 hir
::ExprRet(ref ex
) => {
948 // Check to see if the return expression itself is reachable.
949 // This can occur when the inner expression contains a return
950 let reachable
= if let Some(ref cfg
) = bcx
.fcx
.cfg
{
951 cfg
.node_is_reachable(expr
.id
)
957 controlflow
::trans_ret(bcx
, expr
, ex
.as_ref().map(|e
| &**e
))
959 // If it's not reachable, just translate the inner expression
960 // directly. This avoids having to manage a return slot when
961 // it won't actually be used anyway.
962 if let &Some(ref x
) = ex
{
963 bcx
= trans_into(bcx
, &x
, Ignore
);
965 // Mark the end of the block as unreachable. Once we get to
966 // a return expression, there's no more we should be doing
972 hir
::ExprWhile(ref cond
, ref body
, _
) => {
973 controlflow
::trans_while(bcx
, expr
, &cond
, &body
)
975 hir
::ExprLoop(ref body
, _
) => {
976 controlflow
::trans_loop(bcx
, expr
, &body
)
978 hir
::ExprAssign(ref dst
, ref src
) => {
979 let src_datum
= unpack_datum
!(bcx
, trans(bcx
, &src
));
980 let dst_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, &dst
, "assign"));
982 if bcx
.fcx
.type_needs_drop(dst_datum
.ty
) {
983 // If there are destructors involved, make sure we
984 // are copying from an rvalue, since that cannot possible
985 // alias an lvalue. We are concerned about code like:
993 // where e.g. a : Option<Foo> and a.b :
994 // Option<Foo>. In that case, freeing `a` before the
995 // assignment may also free `a.b`!
997 // We could avoid this intermediary with some analysis
998 // to determine whether `dst` may possibly own `src`.
999 expr
.debug_loc().apply(bcx
.fcx
);
1000 let src_datum
= unpack_datum
!(
1001 bcx
, src_datum
.to_rvalue_datum(bcx
, "ExprAssign"));
1002 let opt_hint_datum
= dst_datum
.kind
.drop_flag_info
.hint_datum(bcx
);
1003 let opt_hint_val
= opt_hint_datum
.map(|d
|d
.to_value());
1005 // 1. Drop the data at the destination, passing the
1006 // drop-hint in case the lvalue has already been
1007 // dropped or moved.
1008 bcx
= glue
::drop_ty_core(bcx
,
1015 // 2. We are overwriting the destination; ensure that
1016 // its drop-hint (if any) says "initialized."
1017 if let Some(hint_val
) = opt_hint_val
{
1018 let hint_llval
= hint_val
.value();
1019 let drop_needed
= C_u8(bcx
.fcx
.ccx
, adt
::DTOR_NEEDED_HINT
);
1020 Store(bcx
, drop_needed
, hint_llval
);
1022 src_datum
.store_to(bcx
, dst_datum
.val
)
1024 src_datum
.store_to(bcx
, dst_datum
.val
)
1027 hir
::ExprAssignOp(op
, ref dst
, ref src
) => {
1028 let method
= bcx
.tcx().tables
1031 .get(&MethodCall
::expr(expr
.id
)).cloned();
1033 if let Some(method
) = method
{
1034 let dst
= unpack_datum
!(bcx
, trans(bcx
, &dst
));
1035 let src_datum
= unpack_datum
!(bcx
, trans(bcx
, &src
));
1037 Callee
::method(bcx
, method
)
1038 .call(bcx
, expr
.debug_loc(),
1039 ArgOverloadedOp(dst
, Some(src_datum
)), None
).bcx
1041 trans_assign_op(bcx
, expr
, op
, &dst
, &src
)
1044 hir
::ExprInlineAsm(ref a
, ref outputs
, ref inputs
) => {
1045 let outputs
= outputs
.iter().map(|output
| {
1046 let out_datum
= unpack_datum
!(bcx
, trans(bcx
, output
));
1047 unpack_datum
!(bcx
, out_datum
.to_lvalue_datum(bcx
, "out", expr
.id
))
1049 let inputs
= inputs
.iter().map(|input
| {
1050 let input
= unpack_datum
!(bcx
, trans(bcx
, input
));
1051 let input
= unpack_datum
!(bcx
, input
.to_rvalue_datum(bcx
, "in"));
1052 input
.to_llscalarish(bcx
)
1054 asm
::trans_inline_asm(bcx
, a
, outputs
, inputs
);
1060 "trans_rvalue_stmt_unadjusted reached \
1061 fall-through case: {:?}",
1067 fn trans_rvalue_dps_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1070 -> Block
<'blk
, 'tcx
> {
1071 let _icx
= push_ctxt("trans_rvalue_dps_unadjusted");
1074 expr
.debug_loc().apply(bcx
.fcx
);
1076 // Entry into the method table if this is an overloaded call/op.
1077 let method_call
= MethodCall
::expr(expr
.id
);
1080 hir
::ExprType(ref e
, _
) => {
1081 trans_into(bcx
, &e
, dest
)
1083 hir
::ExprPath(..) => {
1084 trans_def_dps_unadjusted(bcx
, expr
, bcx
.tcx().expect_def(expr
.id
), dest
)
1086 hir
::ExprIf(ref cond
, ref thn
, ref els
) => {
1087 controlflow
::trans_if(bcx
, expr
.id
, &cond
, &thn
, els
.as_ref().map(|e
| &**e
), dest
)
1089 hir
::ExprMatch(ref discr
, ref arms
, _
) => {
1090 _match
::trans_match(bcx
, expr
, &discr
, &arms
[..], dest
)
1092 hir
::ExprBlock(ref blk
) => {
1093 controlflow
::trans_block(bcx
, &blk
, dest
)
1095 hir
::ExprStruct(_
, ref fields
, ref base
) => {
1098 base
.as_ref().map(|e
| &**e
),
1101 node_id_type(bcx
, expr
.id
),
1104 hir
::ExprTup(ref args
) => {
1105 let numbered_fields
: Vec
<(usize, &hir
::Expr
)> =
1106 args
.iter().enumerate().map(|(i
, arg
)| (i
, &**arg
)).collect();
1110 &numbered_fields
[..],
1115 hir
::ExprLit(ref lit
) => {
1117 ast
::LitKind
::Str(ref s
, _
) => {
1118 tvec
::trans_lit_str(bcx
, expr
, (*s
).clone(), dest
)
1121 span_bug
!(expr
.span
,
1122 "trans_rvalue_dps_unadjusted shouldn't be \
1123 translating this type of literal")
1127 hir
::ExprVec(..) | hir
::ExprRepeat(..) => {
1128 tvec
::trans_fixed_vstore(bcx
, expr
, dest
)
1130 hir
::ExprClosure(_
, ref decl
, ref body
, _
) => {
1131 let dest
= match dest
{
1132 SaveIn(lldest
) => closure
::Dest
::SaveIn(bcx
, lldest
),
1133 Ignore
=> closure
::Dest
::Ignore(bcx
.ccx())
1136 // NB. To get the id of the closure, we don't use
1137 // `local_def_id(id)`, but rather we extract the closure
1138 // def-id from the expr's type. This is because this may
1139 // be an inlined expression from another crate, and we
1140 // want to get the ORIGINAL closure def-id, since that is
1141 // the key we need to find the closure-kind and
1142 // closure-type etc.
1143 let (def_id
, substs
) = match expr_ty(bcx
, expr
).sty
{
1144 ty
::TyClosure(def_id
, substs
) => (def_id
, substs
),
1148 "closure expr without closure type: {:?}", t
),
1151 closure
::trans_closure_expr(dest
,
1156 substs
).unwrap_or(bcx
)
1158 hir
::ExprCall(ref f
, ref args
) => {
1159 let method
= bcx
.tcx().tables
.borrow().method_map
.get(&method_call
).cloned();
1160 let (callee
, args
) = if let Some(method
) = method
{
1161 let mut all_args
= vec
![&**f
];
1162 all_args
.extend(args
.iter().map(|e
| &**e
));
1164 (Callee
::method(bcx
, method
), ArgOverloadedCall(all_args
))
1166 let f
= unpack_datum
!(bcx
, trans(bcx
, f
));
1168 ty
::TyFnDef(def_id
, substs
, _
) => {
1169 Callee
::def(bcx
.ccx(), def_id
, substs
)
1172 let f
= unpack_datum
!(bcx
,
1173 f
.to_rvalue_datum(bcx
, "callee"));
1177 span_bug
!(expr
.span
,
1178 "type of callee is not a fn: {}", f
.ty
);
1182 callee
.call(bcx
, expr
.debug_loc(), args
, Some(dest
)).bcx
1184 hir
::ExprMethodCall(_
, _
, ref args
) => {
1185 Callee
::method_call(bcx
, method_call
)
1186 .call(bcx
, expr
.debug_loc(), ArgExprs(&args
), Some(dest
)).bcx
1188 hir
::ExprBinary(op
, ref lhs
, ref rhs_expr
) => {
1189 // if not overloaded, would be RvalueDatumExpr
1190 let lhs
= unpack_datum
!(bcx
, trans(bcx
, &lhs
));
1191 let mut rhs
= unpack_datum
!(bcx
, trans(bcx
, &rhs_expr
));
1192 if !op
.node
.is_by_value() {
1193 rhs
= unpack_datum
!(bcx
, auto_ref(bcx
, rhs
, rhs_expr
));
1196 Callee
::method_call(bcx
, method_call
)
1197 .call(bcx
, expr
.debug_loc(),
1198 ArgOverloadedOp(lhs
, Some(rhs
)), Some(dest
)).bcx
1200 hir
::ExprUnary(_
, ref subexpr
) => {
1201 // if not overloaded, would be RvalueDatumExpr
1202 let arg
= unpack_datum
!(bcx
, trans(bcx
, &subexpr
));
1204 Callee
::method_call(bcx
, method_call
)
1205 .call(bcx
, expr
.debug_loc(),
1206 ArgOverloadedOp(arg
, None
), Some(dest
)).bcx
1208 hir
::ExprCast(..) => {
1209 // Trait casts used to come this way, now they should be coercions.
1210 span_bug
!(expr
.span
, "DPS expr_cast (residual trait cast?)")
1212 hir
::ExprAssignOp(op
, _
, _
) => {
1215 "augmented assignment `{}=` should always be a rvalue_stmt",
1221 "trans_rvalue_dps_unadjusted reached fall-through \
1228 fn trans_def_dps_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1229 ref_expr
: &hir
::Expr
,
1232 -> Block
<'blk
, 'tcx
> {
1233 let _icx
= push_ctxt("trans_def_dps_unadjusted");
1235 let lldest
= match dest
{
1236 SaveIn(lldest
) => lldest
,
1237 Ignore
=> { return bcx; }
1240 let ty
= expr_ty(bcx
, ref_expr
);
1241 if let ty
::TyFnDef(..) = ty
.sty
{
1242 // Zero-sized function or ctor.
1247 Def
::Variant(tid
, vid
) => {
1248 let variant
= bcx
.tcx().lookup_adt_def(tid
).variant_with_id(vid
);
1250 let ty
= expr_ty(bcx
, ref_expr
);
1251 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1252 adt
::trans_set_discr(bcx
, &repr
, lldest
, Disr
::from(variant
.disr_val
));
1255 Def
::Struct(..) => {
1257 ty
::TyStruct(def
, _
) if def
.has_dtor() => {
1258 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1259 adt
::trans_set_discr(bcx
, &repr
, lldest
, Disr(0));
1266 span_bug
!(ref_expr
.span
,
1267 "Non-DPS def {:?} referened by {}",
1268 def
, bcx
.node_id_to_string(ref_expr
.id
));
1273 fn trans_struct
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1274 fields
: &[hir
::Field
],
1275 base
: Option
<&hir
::Expr
>,
1276 expr_span
: syntax_pos
::Span
,
1277 expr_id
: ast
::NodeId
,
1279 dest
: Dest
) -> Block
<'blk
, 'tcx
> {
1280 let _icx
= push_ctxt("trans_rec");
1282 let tcx
= bcx
.tcx();
1283 let vinfo
= VariantInfo
::of_node(tcx
, ty
, expr_id
);
1285 let mut need_base
= vec
![true; vinfo
.fields
.len()];
1287 let numbered_fields
= fields
.iter().map(|field
| {
1288 let pos
= vinfo
.field_index(field
.name
.node
);
1289 need_base
[pos
] = false;
1291 }).collect
::<Vec
<_
>>();
1293 let optbase
= match base
{
1294 Some(base_expr
) => {
1295 let mut leftovers
= Vec
::new();
1296 for (i
, b
) in need_base
.iter().enumerate() {
1298 leftovers
.push((i
, vinfo
.fields
[i
].1));
1301 Some(StructBaseInfo
{expr
: base_expr
,
1302 fields
: leftovers
})
1305 if need_base
.iter().any(|b
| *b
) {
1306 span_bug
!(expr_span
, "missing fields and no base expr")
1318 DebugLoc
::At(expr_id
, expr_span
))
1321 /// Information that `trans_adt` needs in order to fill in the fields
1322 /// of a struct copied from a base struct (e.g., from an expression
1323 /// like `Foo { a: b, ..base }`.
1325 /// Note that `fields` may be empty; the base expression must always be
1326 /// evaluated for side-effects.
1327 pub struct StructBaseInfo
<'a
, 'tcx
> {
1328 /// The base expression; will be evaluated after all explicit fields.
1329 expr
: &'a hir
::Expr
,
1330 /// The indices of fields to copy paired with their types.
1331 fields
: Vec
<(usize, Ty
<'tcx
>)>
1334 /// Constructs an ADT instance:
1336 /// - `fields` should be a list of field indices paired with the
1337 /// expression to store into that field. The initializers will be
1338 /// evaluated in the order specified by `fields`.
1340 /// - `optbase` contains information on the base struct (if any) from
1341 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1342 pub fn trans_adt
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
1345 fields
: &[(usize, &hir
::Expr
)],
1346 optbase
: Option
<StructBaseInfo
<'a
, 'tcx
>>,
1348 debug_location
: DebugLoc
)
1349 -> Block
<'blk
, 'tcx
> {
1350 let _icx
= push_ctxt("trans_adt");
1352 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1354 debug_location
.apply(bcx
.fcx
);
1356 // If we don't care about the result, just make a
1357 // temporary stack slot
1358 let addr
= match dest
{
1361 let llresult
= alloc_ty(bcx
, ty
, "temp");
1362 call_lifetime_start(bcx
, llresult
);
1367 debug
!("trans_adt");
1369 // This scope holds intermediates that must be cleaned should
1370 // panic occur before the ADT as a whole is ready.
1371 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1374 // Issue 23112: The original logic appeared vulnerable to same
1375 // order-of-eval bug. But, SIMD values are tuple-structs;
1376 // i.e. functional record update (FRU) syntax is unavailable.
1378 // To be safe, double-check that we did not get here via FRU.
1379 assert
!(optbase
.is_none());
1381 // This is the constructor of a SIMD type, such types are
1382 // always primitive machine types and so do not have a
1383 // destructor or require any clean-up.
1384 let llty
= type_of
::type_of(bcx
.ccx(), ty
);
1386 // keep a vector as a register, and running through the field
1387 // `insertelement`ing them directly into that register
1388 // (i.e. avoid GEPi and `store`s to an alloca) .
1389 let mut vec_val
= C_undef(llty
);
1391 for &(i
, ref e
) in fields
{
1392 let block_datum
= trans(bcx
, &e
);
1393 bcx
= block_datum
.bcx
;
1394 let position
= C_uint(bcx
.ccx(), i
);
1395 let value
= block_datum
.datum
.to_llscalarish(bcx
);
1396 vec_val
= InsertElement(bcx
, vec_val
, value
, position
);
1398 Store(bcx
, vec_val
, addr
);
1399 } else if let Some(base
) = optbase
{
1400 // Issue 23112: If there is a base, then order-of-eval
1401 // requires field expressions eval'ed before base expression.
1403 // First, trans field expressions to temporary scratch values.
1404 let scratch_vals
: Vec
<_
> = fields
.iter().map(|&(i
, ref e
)| {
1405 let datum
= unpack_datum
!(bcx
, trans(bcx
, &e
));
1409 debug_location
.apply(bcx
.fcx
);
1411 // Second, trans the base to the dest.
1412 assert_eq
!(discr
, Disr(0));
1414 let addr
= adt
::MaybeSizedValue
::sized(addr
);
1415 match expr_kind(bcx
.tcx(), &base
.expr
) {
1416 ExprKind
::RvalueDps
| ExprKind
::RvalueDatum
if !bcx
.fcx
.type_needs_drop(ty
) => {
1417 bcx
= trans_into(bcx
, &base
.expr
, SaveIn(addr
.value
));
1419 ExprKind
::RvalueStmt
=> {
1420 bug
!("unexpected expr kind for struct base expr")
1423 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, &base
.expr
, "base"));
1424 for &(i
, t
) in &base
.fields
{
1425 let datum
= base_datum
.get_element(
1426 bcx
, t
, |srcval
| adt
::trans_field_ptr(bcx
, &repr
, srcval
, discr
, i
));
1427 assert
!(type_is_sized(bcx
.tcx(), datum
.ty
));
1428 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1429 bcx
= datum
.store_to(bcx
, dest
);
1434 // Finally, move scratch field values into actual field locations
1435 for (i
, datum
) in scratch_vals
{
1436 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1437 bcx
= datum
.store_to(bcx
, dest
);
1440 // No base means we can write all fields directly in place.
1441 let addr
= adt
::MaybeSizedValue
::sized(addr
);
1442 for &(i
, ref e
) in fields
{
1443 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1444 let e_ty
= expr_ty_adjusted(bcx
, &e
);
1445 bcx
= trans_into(bcx
, &e
, SaveIn(dest
));
1446 let scope
= cleanup
::CustomScope(custom_cleanup_scope
);
1447 fcx
.schedule_lifetime_end(scope
, dest
);
1448 // FIXME: nonzeroing move should generalize to fields
1449 fcx
.schedule_drop_mem(scope
, dest
, e_ty
, None
);
1453 adt
::trans_set_discr(bcx
, &repr
, addr
, discr
);
1455 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1457 // If we don't care about the result drop the temporary we made
1461 bcx
= glue
::drop_ty(bcx
, addr
, ty
, debug_location
);
1462 base
::call_lifetime_end(bcx
, addr
);
1469 fn trans_immediate_lit
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1472 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1473 // must not be a string constant, that is a RvalueDpsExpr
1474 let _icx
= push_ctxt("trans_immediate_lit");
1475 let ty
= expr_ty(bcx
, expr
);
1476 let v
= consts
::const_lit(bcx
.ccx(), expr
, lit
);
1477 immediate_rvalue_bcx(bcx
, v
, ty
).to_expr_datumblock()
1480 fn trans_unary
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1483 sub_expr
: &hir
::Expr
)
1484 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1485 let ccx
= bcx
.ccx();
1487 let _icx
= push_ctxt("trans_unary_datum");
1489 let method_call
= MethodCall
::expr(expr
.id
);
1491 // The only overloaded operator that is translated to a datum
1492 // is an overloaded deref, since it is always yields a `&T`.
1493 // Otherwise, we should be in the RvalueDpsExpr path.
1494 assert
!(op
== hir
::UnDeref
|| !ccx
.tcx().is_method_call(expr
.id
));
1496 let un_ty
= expr_ty(bcx
, expr
);
1498 let debug_loc
= expr
.debug_loc();
1502 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1503 let llresult
= Not(bcx
, datum
.to_llscalarish(bcx
), debug_loc
);
1504 immediate_rvalue_bcx(bcx
, llresult
, un_ty
).to_expr_datumblock()
1507 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1508 let val
= datum
.to_llscalarish(bcx
);
1509 let (bcx
, llneg
) = {
1511 let result
= FNeg(bcx
, val
, debug_loc
);
1514 let is_signed
= un_ty
.is_signed();
1515 let result
= Neg(bcx
, val
, debug_loc
);
1516 let bcx
= if bcx
.ccx().check_overflow() && is_signed
{
1517 let (llty
, min
) = base
::llty_and_min_for_signed_ty(bcx
, un_ty
);
1518 let is_min
= ICmp(bcx
, llvm
::IntEQ
, val
,
1519 C_integral(llty
, min
, true), debug_loc
);
1520 with_cond(bcx
, is_min
, |bcx
| {
1521 let msg
= InternedString
::new(
1522 "attempt to negate with overflow");
1523 controlflow
::trans_fail(bcx
, expr_info(expr
), msg
)
1531 immediate_rvalue_bcx(bcx
, llneg
, un_ty
).to_expr_datumblock()
1534 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1535 deref_once(bcx
, expr
, datum
, method_call
)
1540 fn trans_uniq_expr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1541 box_expr
: &hir
::Expr
,
1543 contents
: &hir
::Expr
,
1544 contents_ty
: Ty
<'tcx
>)
1545 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1546 let _icx
= push_ctxt("trans_uniq_expr");
1548 assert
!(type_is_sized(bcx
.tcx(), contents_ty
));
1549 let llty
= type_of
::type_of(bcx
.ccx(), contents_ty
);
1550 let size
= llsize_of(bcx
.ccx(), llty
);
1551 let align
= C_uint(bcx
.ccx(), type_of
::align_of(bcx
.ccx(), contents_ty
));
1552 let llty_ptr
= llty
.ptr_to();
1553 let Result { bcx, val }
= malloc_raw_dyn(bcx
,
1558 box_expr
.debug_loc());
1559 // Unique boxes do not allocate for zero-size types. The standard library
1560 // may assume that `free` is never called on the pointer returned for
1561 // `Box<ZeroSizeType>`.
1562 let bcx
= if llsize_of_alloc(bcx
.ccx(), llty
) == 0 {
1563 trans_into(bcx
, contents
, SaveIn(val
))
1565 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1566 fcx
.schedule_free_value(cleanup
::CustomScope(custom_cleanup_scope
),
1567 val
, cleanup
::HeapExchange
, contents_ty
);
1568 let bcx
= trans_into(bcx
, contents
, SaveIn(val
));
1569 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1572 immediate_rvalue_bcx(bcx
, val
, box_ty
).to_expr_datumblock()
1575 fn trans_addr_of
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1577 subexpr
: &hir
::Expr
)
1578 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1579 let _icx
= push_ctxt("trans_addr_of");
1581 let sub_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, subexpr
, "addr_of"));
1582 let ty
= expr_ty(bcx
, expr
);
1583 if !type_is_sized(bcx
.tcx(), sub_datum
.ty
) {
1584 // Always generate an lvalue datum, because this pointer doesn't own
1585 // the data and cleanup is scheduled elsewhere.
1586 DatumBlock
::new(bcx
, Datum
::new(sub_datum
.val
, ty
, LvalueExpr(sub_datum
.kind
)))
1588 // Sized value, ref to a thin pointer
1589 immediate_rvalue_bcx(bcx
, sub_datum
.val
, ty
).to_expr_datumblock()
1593 fn trans_scalar_binop
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1594 binop_expr
: &hir
::Expr
,
1597 lhs
: Datum
<'tcx
, Rvalue
>,
1598 rhs
: Datum
<'tcx
, Rvalue
>)
1599 -> DatumBlock
<'blk
, 'tcx
, Expr
>
1601 let _icx
= push_ctxt("trans_scalar_binop");
1604 assert
!(!lhs_t
.is_simd());
1605 let is_float
= lhs_t
.is_fp();
1606 let is_signed
= lhs_t
.is_signed();
1607 let info
= expr_info(binop_expr
);
1609 let binop_debug_loc
= binop_expr
.debug_loc();
1612 let lhs
= lhs
.to_llscalarish(bcx
);
1613 let rhs
= rhs
.to_llscalarish(bcx
);
1614 let val
= match op
.node
{
1617 FAdd(bcx
, lhs
, rhs
, binop_debug_loc
)
1619 let (newbcx
, res
) = with_overflow_check(
1620 bcx
, OverflowOp
::Add
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1627 FSub(bcx
, lhs
, rhs
, binop_debug_loc
)
1629 let (newbcx
, res
) = with_overflow_check(
1630 bcx
, OverflowOp
::Sub
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1637 FMul(bcx
, lhs
, rhs
, binop_debug_loc
)
1639 let (newbcx
, res
) = with_overflow_check(
1640 bcx
, OverflowOp
::Mul
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1647 FDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1649 // Only zero-check integers; fp /0 is NaN
1650 bcx
= base
::fail_if_zero_or_overflows(bcx
,
1651 expr_info(binop_expr
),
1657 SDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1659 UDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1665 FRem(bcx
, lhs
, rhs
, binop_debug_loc
)
1667 // Only zero-check integers; fp %0 is NaN
1668 bcx
= base
::fail_if_zero_or_overflows(bcx
,
1669 expr_info(binop_expr
),
1670 op
, lhs
, rhs
, lhs_t
);
1672 SRem(bcx
, lhs
, rhs
, binop_debug_loc
)
1674 URem(bcx
, lhs
, rhs
, binop_debug_loc
)
1678 hir
::BiBitOr
=> Or(bcx
, lhs
, rhs
, binop_debug_loc
),
1679 hir
::BiBitAnd
=> And(bcx
, lhs
, rhs
, binop_debug_loc
),
1680 hir
::BiBitXor
=> Xor(bcx
, lhs
, rhs
, binop_debug_loc
),
1682 let (newbcx
, res
) = with_overflow_check(
1683 bcx
, OverflowOp
::Shl
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1688 let (newbcx
, res
) = with_overflow_check(
1689 bcx
, OverflowOp
::Shr
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1693 hir
::BiEq
| hir
::BiNe
| hir
::BiLt
| hir
::BiGe
| hir
::BiLe
| hir
::BiGt
=> {
1694 base
::compare_scalar_types(bcx
, lhs
, rhs
, lhs_t
, op
.node
, binop_debug_loc
)
1697 span_bug
!(binop_expr
.span
, "unexpected binop");
1701 immediate_rvalue_bcx(bcx
, val
, binop_ty
).to_expr_datumblock()
1704 // refinement types would obviate the need for this
1705 #[derive(Clone, Copy)]
1706 enum lazy_binop_ty
{
1712 fn trans_lazy_binop
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1713 binop_expr
: &hir
::Expr
,
1717 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1718 let _icx
= push_ctxt("trans_lazy_binop");
1719 let binop_ty
= expr_ty(bcx
, binop_expr
);
1722 let DatumBlock {bcx: past_lhs, datum: lhs}
= trans(bcx
, a
);
1723 let lhs
= lhs
.to_llscalarish(past_lhs
);
1725 if past_lhs
.unreachable
.get() {
1726 return immediate_rvalue_bcx(past_lhs
, lhs
, binop_ty
).to_expr_datumblock();
1729 // If the rhs can never be reached, don't generate code for it.
1730 if let Some(cond_val
) = const_to_opt_uint(lhs
) {
1731 match (cond_val
, op
) {
1734 return immediate_rvalue_bcx(past_lhs
, lhs
, binop_ty
).to_expr_datumblock();
1736 _
=> { /* continue */ }
1740 let join
= fcx
.new_id_block("join", binop_expr
.id
);
1741 let before_rhs
= fcx
.new_id_block("before_rhs", b
.id
);
1744 lazy_and
=> CondBr(past_lhs
, lhs
, before_rhs
.llbb
, join
.llbb
, DebugLoc
::None
),
1745 lazy_or
=> CondBr(past_lhs
, lhs
, join
.llbb
, before_rhs
.llbb
, DebugLoc
::None
)
1748 let DatumBlock {bcx: past_rhs, datum: rhs}
= trans(before_rhs
, b
);
1749 let rhs
= rhs
.to_llscalarish(past_rhs
);
1751 if past_rhs
.unreachable
.get() {
1752 return immediate_rvalue_bcx(join
, lhs
, binop_ty
).to_expr_datumblock();
1755 Br(past_rhs
, join
.llbb
, DebugLoc
::None
);
1756 let phi
= Phi(join
, Type
::i1(bcx
.ccx()), &[lhs
, rhs
],
1757 &[past_lhs
.llbb
, past_rhs
.llbb
]);
1759 return immediate_rvalue_bcx(join
, phi
, binop_ty
).to_expr_datumblock();
1762 fn trans_binary
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1767 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1768 let _icx
= push_ctxt("trans_binary");
1769 let ccx
= bcx
.ccx();
1771 // if overloaded, would be RvalueDpsExpr
1772 assert
!(!ccx
.tcx().is_method_call(expr
.id
));
1776 trans_lazy_binop(bcx
, expr
, lazy_and
, lhs
, rhs
)
1779 trans_lazy_binop(bcx
, expr
, lazy_or
, lhs
, rhs
)
1783 let binop_ty
= expr_ty(bcx
, expr
);
1785 let lhs
= unpack_datum
!(bcx
, trans(bcx
, lhs
));
1786 let lhs
= unpack_datum
!(bcx
, lhs
.to_rvalue_datum(bcx
, "binop_lhs"));
1787 debug
!("trans_binary (expr {}): lhs={:?}", expr
.id
, lhs
);
1788 let rhs
= unpack_datum
!(bcx
, trans(bcx
, rhs
));
1789 let rhs
= unpack_datum
!(bcx
, rhs
.to_rvalue_datum(bcx
, "binop_rhs"));
1790 debug
!("trans_binary (expr {}): rhs={:?}", expr
.id
, rhs
);
1792 if type_is_fat_ptr(ccx
.tcx(), lhs
.ty
) {
1793 assert
!(type_is_fat_ptr(ccx
.tcx(), rhs
.ty
),
1794 "built-in binary operators on fat pointers are homogeneous");
1795 assert_eq
!(binop_ty
, bcx
.tcx().types
.bool
);
1796 let val
= base
::compare_scalar_types(
1803 immediate_rvalue_bcx(bcx
, val
, binop_ty
).to_expr_datumblock()
1805 assert
!(!type_is_fat_ptr(ccx
.tcx(), rhs
.ty
),
1806 "built-in binary operators on fat pointers are homogeneous");
1807 trans_scalar_binop(bcx
, expr
, binop_ty
, op
, lhs
, rhs
)
1813 pub fn cast_is_noop
<'a
, 'tcx
>(tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>,
1818 if let Some(&CastKind
::CoercionCast
) = tcx
.cast_kinds
.borrow().get(&expr
.id
) {
1822 match (t_in
.builtin_deref(true, ty
::NoPreference
),
1823 t_out
.builtin_deref(true, ty
::NoPreference
)) {
1824 (Some(ty
::TypeAndMut{ ty: t_in, .. }
), Some(ty
::TypeAndMut{ ty: t_out, .. }
)) => {
1828 // This condition isn't redundant with the check for CoercionCast:
1829 // different types can be substituted into the same type, and
1830 // == equality can be overconservative if there are regions.
1836 fn trans_imm_cast
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1839 -> DatumBlock
<'blk
, 'tcx
, Expr
>
1841 use rustc
::ty
::cast
::CastTy
::*;
1842 use rustc
::ty
::cast
::IntTy
::*;
1844 fn int_cast(bcx
: Block
,
1851 let _icx
= push_ctxt("int_cast");
1852 let srcsz
= llsrctype
.int_width();
1853 let dstsz
= lldsttype
.int_width();
1854 return if dstsz
== srcsz
{
1855 BitCast(bcx
, llsrc
, lldsttype
)
1856 } else if srcsz
> dstsz
{
1857 TruncOrBitCast(bcx
, llsrc
, lldsttype
)
1859 SExtOrBitCast(bcx
, llsrc
, lldsttype
)
1861 ZExtOrBitCast(bcx
, llsrc
, lldsttype
)
1865 fn float_cast(bcx
: Block
,
1871 let _icx
= push_ctxt("float_cast");
1872 let srcsz
= llsrctype
.float_width();
1873 let dstsz
= lldsttype
.float_width();
1874 return if dstsz
> srcsz
{
1875 FPExt(bcx
, llsrc
, lldsttype
)
1876 } else if srcsz
> dstsz
{
1877 FPTrunc(bcx
, llsrc
, lldsttype
)
1881 let _icx
= push_ctxt("trans_cast");
1883 let ccx
= bcx
.ccx();
1885 let t_in
= expr_ty_adjusted(bcx
, expr
);
1886 let t_out
= node_id_type(bcx
, id
);
1888 debug
!("trans_cast({:?} as {:?})", t_in
, t_out
);
1889 let mut ll_t_in
= type_of
::immediate_type_of(ccx
, t_in
);
1890 let ll_t_out
= type_of
::immediate_type_of(ccx
, t_out
);
1891 // Convert the value to be cast into a ValueRef, either by-ref or
1892 // by-value as appropriate given its type:
1893 let mut datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
1895 let datum_ty
= monomorphize_type(bcx
, datum
.ty
);
1897 if cast_is_noop(bcx
.tcx(), expr
, datum_ty
, t_out
) {
1899 return DatumBlock
::new(bcx
, datum
);
1902 if type_is_fat_ptr(bcx
.tcx(), t_in
) {
1903 assert
!(datum
.kind
.is_by_ref());
1904 if type_is_fat_ptr(bcx
.tcx(), t_out
) {
1905 return DatumBlock
::new(bcx
, Datum
::new(
1906 PointerCast(bcx
, datum
.val
, ll_t_out
.ptr_to()),
1909 )).to_expr_datumblock();
1911 // Return the address
1912 return immediate_rvalue_bcx(bcx
,
1914 Load(bcx
, get_dataptr(bcx
, datum
.val
)),
1916 t_out
).to_expr_datumblock();
1920 let r_t_in
= CastTy
::from_ty(t_in
).expect("bad input type for cast");
1921 let r_t_out
= CastTy
::from_ty(t_out
).expect("bad output type for cast");
1923 let (llexpr
, signed
) = if let Int(CEnum
) = r_t_in
{
1924 let repr
= adt
::represent_type(ccx
, t_in
);
1925 let datum
= unpack_datum
!(
1926 bcx
, datum
.to_lvalue_datum(bcx
, "trans_imm_cast", expr
.id
));
1927 let llexpr_ptr
= datum
.to_llref();
1928 let discr
= adt
::trans_get_discr(bcx
, &repr
, llexpr_ptr
,
1929 Some(Type
::i64(ccx
)), true);
1930 ll_t_in
= val_ty(discr
);
1931 (discr
, adt
::is_discr_signed(&repr
))
1933 (datum
.to_llscalarish(bcx
), t_in
.is_signed())
1936 let newval
= match (r_t_in
, r_t_out
) {
1937 (Ptr(_
), Ptr(_
)) | (FnPtr
, Ptr(_
)) | (RPtr(_
), Ptr(_
)) => {
1938 PointerCast(bcx
, llexpr
, ll_t_out
)
1940 (Ptr(_
), Int(_
)) | (FnPtr
, Int(_
)) => PtrToInt(bcx
, llexpr
, ll_t_out
),
1941 (Int(_
), Ptr(_
)) => IntToPtr(bcx
, llexpr
, ll_t_out
),
1943 (Int(_
), Int(_
)) => int_cast(bcx
, ll_t_out
, ll_t_in
, llexpr
, signed
),
1944 (Float
, Float
) => float_cast(bcx
, ll_t_out
, ll_t_in
, llexpr
),
1945 (Int(_
), Float
) if signed
=> SIToFP(bcx
, llexpr
, ll_t_out
),
1946 (Int(_
), Float
) => UIToFP(bcx
, llexpr
, ll_t_out
),
1947 (Float
, Int(I
)) => FPToSI(bcx
, llexpr
, ll_t_out
),
1948 (Float
, Int(_
)) => FPToUI(bcx
, llexpr
, ll_t_out
),
1950 _
=> span_bug
!(expr
.span
,
1951 "translating unsupported cast: \
1956 return immediate_rvalue_bcx(bcx
, newval
, t_out
).to_expr_datumblock();
1959 fn trans_assign_op
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1964 -> Block
<'blk
, 'tcx
> {
1965 let _icx
= push_ctxt("trans_assign_op");
1968 debug
!("trans_assign_op(expr={:?})", expr
);
1970 // User-defined operator methods cannot be used with `+=` etc right now
1971 assert
!(!bcx
.tcx().is_method_call(expr
.id
));
1973 // Evaluate LHS (destination), which should be an lvalue
1974 let dst
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, dst
, "assign_op"));
1975 assert
!(!bcx
.fcx
.type_needs_drop(dst
.ty
));
1976 let lhs
= load_ty(bcx
, dst
.val
, dst
.ty
);
1977 let lhs
= immediate_rvalue(lhs
, dst
.ty
);
1979 // Evaluate RHS - FIXME(#28160) this sucks
1980 let rhs
= unpack_datum
!(bcx
, trans(bcx
, &src
));
1981 let rhs
= unpack_datum
!(bcx
, rhs
.to_rvalue_datum(bcx
, "assign_op_rhs"));
1983 // Perform computation and store the result
1984 let result_datum
= unpack_datum
!(
1985 bcx
, trans_scalar_binop(bcx
, expr
, dst
.ty
, op
, lhs
, rhs
));
1986 return result_datum
.store_to(bcx
, dst
.val
);
1989 fn auto_ref
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1990 datum
: Datum
<'tcx
, Expr
>,
1992 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1995 // Ensure cleanup of `datum` if not already scheduled and obtain
1996 // a "by ref" pointer.
1997 let lv_datum
= unpack_datum
!(bcx
, datum
.to_lvalue_datum(bcx
, "autoref", expr
.id
));
1999 // Compute final type. Note that we are loose with the region and
2000 // mutability, since those things don't matter in trans.
2001 let referent_ty
= lv_datum
.ty
;
2002 let ptr_ty
= bcx
.tcx().mk_imm_ref(bcx
.tcx().mk_region(ty
::ReErased
), referent_ty
);
2004 // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
2005 // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
2006 // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
2007 // indirection and for thin pointers, this has no ill effects.
2008 let kind
= if type_is_sized(bcx
.tcx(), referent_ty
) {
2009 RvalueExpr(Rvalue
::new(ByValue
))
2011 LvalueExpr(lv_datum
.kind
)
2015 let llref
= lv_datum
.to_llref();
2016 DatumBlock
::new(bcx
, Datum
::new(llref
, ptr_ty
, kind
))
2019 fn deref_multiple
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2021 datum
: Datum
<'tcx
, Expr
>,
2023 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2025 let mut datum
= datum
;
2027 let method_call
= MethodCall
::autoderef(expr
.id
, i
as u32);
2028 datum
= unpack_datum
!(bcx
, deref_once(bcx
, expr
, datum
, method_call
));
2030 DatumBlock { bcx: bcx, datum: datum }
2033 fn deref_once
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2035 datum
: Datum
<'tcx
, Expr
>,
2036 method_call
: MethodCall
)
2037 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2038 let ccx
= bcx
.ccx();
2040 debug
!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
2041 expr
, datum
, method_call
);
2045 // Check for overloaded deref.
2046 let method
= ccx
.tcx().tables
.borrow().method_map
.get(&method_call
).cloned();
2047 let datum
= match method
{
2049 let method_ty
= monomorphize_type(bcx
, method
.ty
);
2051 // Overloaded. Invoke the deref() method, which basically
2052 // converts from the `Smaht<T>` pointer that we have into
2053 // a `&T` pointer. We can then proceed down the normal
2054 // path (below) to dereference that `&T`.
2055 let datum
= if method_call
.autoderef
== 0 {
2058 // Always perform an AutoPtr when applying an overloaded auto-deref
2059 unpack_datum
!(bcx
, auto_ref(bcx
, datum
, expr
))
2062 let ref_ty
= // invoked methods have their LB regions instantiated
2063 ccx
.tcx().no_late_bound_regions(&method_ty
.fn_ret()).unwrap();
2064 let scratch
= rvalue_scratch_datum(bcx
, ref_ty
, "overloaded_deref");
2066 bcx
= Callee
::method(bcx
, method
)
2067 .call(bcx
, expr
.debug_loc(),
2068 ArgOverloadedOp(datum
, None
),
2069 Some(SaveIn(scratch
.val
))).bcx
;
2070 scratch
.to_expr_datum()
2073 // Not overloaded. We already have a pointer we know how to deref.
2078 let r
= match datum
.ty
.sty
{
2079 ty
::TyBox(content_ty
) => {
2080 // Make sure we have an lvalue datum here to get the
2081 // proper cleanups scheduled
2082 let datum
= unpack_datum
!(
2083 bcx
, datum
.to_lvalue_datum(bcx
, "deref", expr
.id
));
2085 if type_is_sized(bcx
.tcx(), content_ty
) {
2086 let ptr
= load_ty(bcx
, datum
.val
, datum
.ty
);
2087 DatumBlock
::new(bcx
, Datum
::new(ptr
, content_ty
, LvalueExpr(datum
.kind
)))
2089 // A fat pointer and a DST lvalue have the same representation
2090 // just different types. Since there is no temporary for `*e`
2091 // here (because it is unsized), we cannot emulate the sized
2092 // object code path for running drop glue and free. Instead,
2093 // we schedule cleanup for `e`, turning it into an lvalue.
2095 let lval
= Lvalue
::new("expr::deref_once ty_uniq");
2096 let datum
= Datum
::new(datum
.val
, content_ty
, LvalueExpr(lval
));
2097 DatumBlock
::new(bcx
, datum
)
2101 ty
::TyRawPtr(ty
::TypeAndMut { ty: content_ty, .. }
) |
2102 ty
::TyRef(_
, ty
::TypeAndMut { ty: content_ty, .. }
) => {
2103 let lval
= Lvalue
::new("expr::deref_once ptr");
2104 if type_is_sized(bcx
.tcx(), content_ty
) {
2105 let ptr
= datum
.to_llscalarish(bcx
);
2107 // Always generate an lvalue datum, even if datum.mode is
2108 // an rvalue. This is because datum.mode is only an
2109 // rvalue for non-owning pointers like &T or *T, in which
2110 // case cleanup *is* scheduled elsewhere, by the true
2111 // owner (or, in the case of *T, by the user).
2112 DatumBlock
::new(bcx
, Datum
::new(ptr
, content_ty
, LvalueExpr(lval
)))
2114 // A fat pointer and a DST lvalue have the same representation
2115 // just different types.
2116 DatumBlock
::new(bcx
, Datum
::new(datum
.val
, content_ty
, LvalueExpr(lval
)))
2123 "deref invoked on expr of invalid type {:?}",
2128 debug
!("deref_once(expr={}, method_call={:?}, result={:?})",
2129 expr
.id
, method_call
, r
.datum
);
2144 fn codegen_strategy(&self) -> OverflowCodegen
{
2145 use self::OverflowCodegen
::{ViaIntrinsic, ViaInputCheck}
;
2147 OverflowOp
::Add
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Add
),
2148 OverflowOp
::Sub
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Sub
),
2149 OverflowOp
::Mul
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Mul
),
2151 OverflowOp
::Shl
=> ViaInputCheck(OverflowOpViaInputCheck
::Shl
),
2152 OverflowOp
::Shr
=> ViaInputCheck(OverflowOpViaInputCheck
::Shr
),
2157 enum OverflowCodegen
{
2158 ViaIntrinsic(OverflowOpViaIntrinsic
),
2159 ViaInputCheck(OverflowOpViaInputCheck
),
2162 enum OverflowOpViaInputCheck { Shl, Shr, }
2165 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2167 impl OverflowOpViaIntrinsic
{
2168 fn to_intrinsic
<'blk
, 'tcx
>(&self, bcx
: Block
<'blk
, 'tcx
>, lhs_ty
: Ty
) -> ValueRef
{
2169 let name
= self.to_intrinsic_name(bcx
.tcx(), lhs_ty
);
2170 bcx
.ccx().get_intrinsic(&name
)
2172 fn to_intrinsic_name(&self, tcx
: TyCtxt
, ty
: Ty
) -> &'
static str {
2173 use syntax
::ast
::IntTy
::*;
2174 use syntax
::ast
::UintTy
::*;
2175 use rustc
::ty
::{TyInt, TyUint}
;
2177 let new_sty
= match ty
.sty
{
2178 TyInt(Is
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
2182 _
=> bug
!("unsupported target word size")
2184 TyUint(Us
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
2185 "16" => TyUint(U16
),
2186 "32" => TyUint(U32
),
2187 "64" => TyUint(U64
),
2188 _
=> bug
!("unsupported target word size")
2190 ref t @
TyUint(_
) | ref t @
TyInt(_
) => t
.clone(),
2191 _
=> bug
!("tried to get overflow intrinsic for {:?} applied to non-int type",
2196 OverflowOpViaIntrinsic
::Add
=> match new_sty
{
2197 TyInt(I8
) => "llvm.sadd.with.overflow.i8",
2198 TyInt(I16
) => "llvm.sadd.with.overflow.i16",
2199 TyInt(I32
) => "llvm.sadd.with.overflow.i32",
2200 TyInt(I64
) => "llvm.sadd.with.overflow.i64",
2202 TyUint(U8
) => "llvm.uadd.with.overflow.i8",
2203 TyUint(U16
) => "llvm.uadd.with.overflow.i16",
2204 TyUint(U32
) => "llvm.uadd.with.overflow.i32",
2205 TyUint(U64
) => "llvm.uadd.with.overflow.i64",
2209 OverflowOpViaIntrinsic
::Sub
=> match new_sty
{
2210 TyInt(I8
) => "llvm.ssub.with.overflow.i8",
2211 TyInt(I16
) => "llvm.ssub.with.overflow.i16",
2212 TyInt(I32
) => "llvm.ssub.with.overflow.i32",
2213 TyInt(I64
) => "llvm.ssub.with.overflow.i64",
2215 TyUint(U8
) => "llvm.usub.with.overflow.i8",
2216 TyUint(U16
) => "llvm.usub.with.overflow.i16",
2217 TyUint(U32
) => "llvm.usub.with.overflow.i32",
2218 TyUint(U64
) => "llvm.usub.with.overflow.i64",
2222 OverflowOpViaIntrinsic
::Mul
=> match new_sty
{
2223 TyInt(I8
) => "llvm.smul.with.overflow.i8",
2224 TyInt(I16
) => "llvm.smul.with.overflow.i16",
2225 TyInt(I32
) => "llvm.smul.with.overflow.i32",
2226 TyInt(I64
) => "llvm.smul.with.overflow.i64",
2228 TyUint(U8
) => "llvm.umul.with.overflow.i8",
2229 TyUint(U16
) => "llvm.umul.with.overflow.i16",
2230 TyUint(U32
) => "llvm.umul.with.overflow.i32",
2231 TyUint(U64
) => "llvm.umul.with.overflow.i64",
2238 fn build_intrinsic_call
<'blk
, 'tcx
>(&self, bcx
: Block
<'blk
, 'tcx
>,
2239 info
: NodeIdAndSpan
,
2240 lhs_t
: Ty
<'tcx
>, lhs
: ValueRef
,
2242 binop_debug_loc
: DebugLoc
)
2243 -> (Block
<'blk
, 'tcx
>, ValueRef
) {
2244 use rustc_const_math
::{ConstMathErr, Op}
;
2246 let llfn
= self.to_intrinsic(bcx
, lhs_t
);
2248 let val
= Call(bcx
, llfn
, &[lhs
, rhs
], binop_debug_loc
);
2249 let result
= ExtractValue(bcx
, val
, 0); // iN operation result
2250 let overflow
= ExtractValue(bcx
, val
, 1); // i1 "did it overflow?"
2252 let cond
= ICmp(bcx
, llvm
::IntEQ
, overflow
, C_integral(Type
::i1(bcx
.ccx()), 1, false),
2255 let expect
= bcx
.ccx().get_intrinsic(&"llvm.expect.i1");
2256 let expected
= Call(bcx
, expect
, &[cond
, C_bool(bcx
.ccx(), false)],
2259 let op
= match *self {
2260 OverflowOpViaIntrinsic
::Add
=> Op
::Add
,
2261 OverflowOpViaIntrinsic
::Sub
=> Op
::Sub
,
2262 OverflowOpViaIntrinsic
::Mul
=> Op
::Mul
2266 base
::with_cond(bcx
, expected
, |bcx
|
2267 controlflow
::trans_fail(bcx
, info
,
2268 InternedString
::new(ConstMathErr
::Overflow(op
).description())));
2274 impl OverflowOpViaInputCheck
{
2275 fn build_with_input_check
<'blk
, 'tcx
>(&self,
2276 bcx
: Block
<'blk
, 'tcx
>,
2277 info
: NodeIdAndSpan
,
2281 binop_debug_loc
: DebugLoc
)
2282 -> (Block
<'blk
, 'tcx
>, ValueRef
)
2284 use rustc_const_math
::{ConstMathErr, Op}
;
2286 let lhs_llty
= val_ty(lhs
);
2287 let rhs_llty
= val_ty(rhs
);
2289 // Panic if any bits are set outside of bits that we always
2292 // Note that the mask's value is derived from the LHS type
2293 // (since that is where the 32/64 distinction is relevant) but
2294 // the mask's type must match the RHS type (since they will
2295 // both be fed into an and-binop)
2296 let invert_mask
= shift_mask_val(bcx
, lhs_llty
, rhs_llty
, true);
2298 let outer_bits
= And(bcx
, rhs
, invert_mask
, binop_debug_loc
);
2299 let cond
= build_nonzero_check(bcx
, outer_bits
, binop_debug_loc
);
2300 let (result
, op
) = match *self {
2301 OverflowOpViaInputCheck
::Shl
=>
2302 (build_unchecked_lshift(bcx
, lhs
, rhs
, binop_debug_loc
), Op
::Shl
),
2303 OverflowOpViaInputCheck
::Shr
=>
2304 (build_unchecked_rshift(bcx
, lhs_t
, lhs
, rhs
, binop_debug_loc
), Op
::Shr
)
2307 base
::with_cond(bcx
, cond
, |bcx
|
2308 controlflow
::trans_fail(bcx
, info
,
2309 InternedString
::new(ConstMathErr
::Overflow(op
).description())));
2315 // Check if an integer or vector contains a nonzero element.
2316 fn build_nonzero_check
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2318 binop_debug_loc
: DebugLoc
) -> ValueRef
{
2319 let llty
= val_ty(value
);
2320 let kind
= llty
.kind();
2322 TypeKind
::Integer
=> ICmp(bcx
, llvm
::IntNE
, value
, C_null(llty
), binop_debug_loc
),
2323 TypeKind
::Vector
=> {
2324 // Check if any elements of the vector are nonzero by treating
2325 // it as a wide integer and checking if the integer is nonzero.
2326 let width
= llty
.vector_length() as u64 * llty
.element_type().int_width();
2327 let int_value
= BitCast(bcx
, value
, Type
::ix(bcx
.ccx(), width
));
2328 build_nonzero_check(bcx
, int_value
, binop_debug_loc
)
2330 _
=> bug
!("build_nonzero_check: expected Integer or Vector, found {:?}", kind
),
2334 fn with_overflow_check
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, oop
: OverflowOp
, info
: NodeIdAndSpan
,
2335 lhs_t
: Ty
<'tcx
>, lhs
: ValueRef
,
2337 binop_debug_loc
: DebugLoc
)
2338 -> (Block
<'blk
, 'tcx
>, ValueRef
) {
2339 if bcx
.unreachable
.get() { return (bcx, _Undef(lhs)); }
2340 if bcx
.ccx().check_overflow() {
2342 match oop
.codegen_strategy() {
2343 OverflowCodegen
::ViaIntrinsic(oop
) =>
2344 oop
.build_intrinsic_call(bcx
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2345 OverflowCodegen
::ViaInputCheck(oop
) =>
2346 oop
.build_with_input_check(bcx
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2349 let res
= match oop
{
2350 OverflowOp
::Add
=> Add(bcx
, lhs
, rhs
, binop_debug_loc
),
2351 OverflowOp
::Sub
=> Sub(bcx
, lhs
, rhs
, binop_debug_loc
),
2352 OverflowOp
::Mul
=> Mul(bcx
, lhs
, rhs
, binop_debug_loc
),
2355 build_unchecked_lshift(bcx
, lhs
, rhs
, binop_debug_loc
),
2357 build_unchecked_rshift(bcx
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2363 /// We categorize expressions into three kinds. The distinction between
2364 /// lvalue/rvalue is fundamental to the language. The distinction between the
2365 /// two kinds of rvalues is an artifact of trans which reflects how we will
2366 /// generate code for that kind of expression. See trans/expr.rs for more
2368 #[derive(Copy, Clone)]
2376 fn expr_kind
<'a
, 'tcx
>(tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>, expr
: &hir
::Expr
) -> ExprKind
{
2377 if tcx
.is_method_call(expr
.id
) {
2378 // Overloaded operations are generally calls, and hence they are
2379 // generated via DPS, but there are a few exceptions:
2380 return match expr
.node
{
2381 // `a += b` has a unit result.
2382 hir
::ExprAssignOp(..) => ExprKind
::RvalueStmt
,
2384 // the deref method invoked for `*a` always yields an `&T`
2385 hir
::ExprUnary(hir
::UnDeref
, _
) => ExprKind
::Lvalue
,
2387 // the index method invoked for `a[i]` always yields an `&T`
2388 hir
::ExprIndex(..) => ExprKind
::Lvalue
,
2390 // in the general case, result could be any type, use DPS
2391 _
=> ExprKind
::RvalueDps
2396 hir
::ExprPath(..) => {
2397 match tcx
.expect_def(expr
.id
) {
2398 // Put functions and ctors with the ADTs, as they
2399 // are zero-sized, so DPS is the cheapest option.
2400 Def
::Struct(..) | Def
::Variant(..) |
2401 Def
::Fn(..) | Def
::Method(..) => {
2405 // Note: there is actually a good case to be made that
2406 // DefArg's, particularly those of immediate type, ought to
2407 // considered rvalues.
2410 Def
::Local(..) => ExprKind
::Lvalue
,
2413 Def
::AssociatedConst(..) => ExprKind
::RvalueDatum
,
2418 "uncategorized def for expr {}: {:?}",
2425 hir
::ExprType(ref expr
, _
) => {
2426 expr_kind(tcx
, expr
)
2429 hir
::ExprUnary(hir
::UnDeref
, _
) |
2430 hir
::ExprField(..) |
2431 hir
::ExprTupField(..) |
2432 hir
::ExprIndex(..) => {
2437 hir
::ExprMethodCall(..) |
2438 hir
::ExprStruct(..) |
2441 hir
::ExprMatch(..) |
2442 hir
::ExprClosure(..) |
2443 hir
::ExprBlock(..) |
2444 hir
::ExprRepeat(..) |
2445 hir
::ExprVec(..) => {
2449 hir
::ExprLit(ref lit
) if lit
.node
.is_str() => {
2453 hir
::ExprBreak(..) |
2454 hir
::ExprAgain(..) |
2456 hir
::ExprWhile(..) |
2458 hir
::ExprAssign(..) |
2459 hir
::ExprInlineAsm(..) |
2460 hir
::ExprAssignOp(..) => {
2461 ExprKind
::RvalueStmt
2464 hir
::ExprLit(_
) | // Note: LitStr is carved out above
2465 hir
::ExprUnary(..) |
2467 hir
::ExprAddrOf(..) |
2468 hir
::ExprBinary(..) |
2469 hir
::ExprCast(..) => {
2470 ExprKind
::RvalueDatum