1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest
::*;
52 use self::lazy_binop_ty
::*;
55 use llvm
::{self, ValueRef, TypeKind}
;
56 use middle
::const_qualif
::ConstQualif
;
58 use middle
::subst
::Substs
;
59 use trans
::{_match, adt, asm, base, callee, closure, consts, controlflow}
;
62 use trans
::cleanup
::{self, CleanupMethods, DropHintMethods}
;
65 use trans
::debuginfo
::{self, DebugLoc, ToDebugLoc}
;
73 use middle
::ty
::adjustment
::{AdjustDerefRef, AdjustReifyFnPointer}
;
74 use middle
::ty
::adjustment
::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}
;
75 use middle
::ty
::adjustment
::CustomCoerceUnsized
;
76 use middle
::ty
::{self, Ty}
;
77 use middle
::ty
::MethodCall
;
78 use middle
::ty
::cast
::{CastKind, CastTy}
;
79 use util
::common
::indenter
;
80 use trans
::machine
::{llsize_of, llsize_of_alloc}
;
81 use trans
::type_
::Type
;
86 use syntax
::{ast, codemap}
;
87 use syntax
::parse
::token
::InternedString
;
89 use syntax
::parse
::token
;
94 // These are passed around by the code generating functions to track the
95 // destination of a computation's value.
97 #[derive(Copy, Clone, PartialEq)]
104 pub fn to_string(&self, ccx
: &CrateContext
) -> String
{
106 SaveIn(v
) => format
!("SaveIn({})", ccx
.tn().val_to_string(v
)),
107 Ignore
=> "Ignore".to_string()
112 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
113 /// better optimized LLVM code.
114 pub fn trans_into
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
117 -> Block
<'blk
, 'tcx
> {
120 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
122 if adjustment_required(bcx
, expr
) {
123 // use trans, which may be less efficient but
124 // which will perform the adjustments:
125 let datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
126 return datum
.store_to_dest(bcx
, dest
, expr
.id
);
129 let qualif
= *bcx
.tcx().const_qualif_map
.borrow().get(&expr
.id
).unwrap();
130 if !qualif
.intersects(ConstQualif
::NOT_CONST
| ConstQualif
::NEEDS_DROP
) {
131 if !qualif
.intersects(ConstQualif
::PREFER_IN_PLACE
) {
132 if let SaveIn(lldest
) = dest
{
133 match consts
::get_const_expr_as_global(bcx
.ccx(), expr
, qualif
,
134 bcx
.fcx
.param_substs
,
135 consts
::TrueConst
::No
) {
137 // Cast pointer to destination, because constants
138 // have different types.
139 let lldest
= PointerCast(bcx
, lldest
, val_ty(global
));
140 memcpy_ty(bcx
, lldest
, global
, expr_ty_adjusted(bcx
, expr
));
143 Err(consts
::ConstEvalFailure
::Runtime(_
)) => {
144 // in case const evaluation errors, translate normally
145 // debug assertions catch the same errors
148 Err(consts
::ConstEvalFailure
::Compiletime(_
)) => {
154 // If we see a const here, that's because it evaluates to a type with zero size. We
155 // should be able to just discard it, since const expressions are guaranteed not to
156 // have side effects. This seems to be reached through tuple struct constructors being
157 // passed zero-size constants.
158 if let hir
::ExprPath(..) = expr
.node
{
159 match bcx
.def(expr
.id
) {
160 Def
::Const(_
) | Def
::AssociatedConst(_
) => {
161 assert
!(type_is_zero_size(bcx
.ccx(), bcx
.tcx().node_id_to_type(expr
.id
)));
168 // Even if we don't have a value to emit, and the expression
169 // doesn't have any side-effects, we still have to translate the
170 // body of any closures.
171 // FIXME: Find a better way of handling this case.
173 // The only way we're going to see a `const` at this point is if
174 // it prefers in-place instantiation, likely because it contains
175 // `[x; N]` somewhere within.
177 hir
::ExprPath(..) => {
178 match bcx
.def(expr
.id
) {
179 Def
::Const(did
) | Def
::AssociatedConst(did
) => {
180 let empty_substs
= bcx
.tcx().mk_substs(Substs
::trans_empty());
181 let const_expr
= consts
::get_const_expr(bcx
.ccx(), did
, expr
,
183 // Temporarily get cleanup scopes out of the way,
184 // as they require sub-expressions to be contained
185 // inside the current AST scope.
186 // These should record no cleanups anyways, `const`
187 // can't have destructors.
188 let scopes
= mem
::replace(&mut *bcx
.fcx
.scopes
.borrow_mut(),
190 // Lock emitted debug locations to the location of
191 // the constant reference expression.
192 debuginfo
::with_source_location_override(bcx
.fcx
,
195 bcx
= trans_into(bcx
, const_expr
, dest
)
197 let scopes
= mem
::replace(&mut *bcx
.fcx
.scopes
.borrow_mut(),
199 assert
!(scopes
.is_empty());
210 debug
!("trans_into() expr={:?}", expr
);
212 let cleanup_debug_loc
= debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
216 bcx
.fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
218 let kind
= expr_kind(bcx
.tcx(), expr
);
220 ExprKind
::Lvalue
| ExprKind
::RvalueDatum
=> {
221 trans_unadjusted(bcx
, expr
).store_to_dest(dest
, expr
.id
)
223 ExprKind
::RvalueDps
=> {
224 trans_rvalue_dps_unadjusted(bcx
, expr
, dest
)
226 ExprKind
::RvalueStmt
=> {
227 trans_rvalue_stmt_unadjusted(bcx
, expr
)
231 bcx
.fcx
.pop_and_trans_ast_cleanup_scope(bcx
, expr
.id
)
234 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
235 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
237 pub fn trans
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
239 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
240 debug
!("trans(expr={:?})", expr
);
244 let qualif
= *bcx
.tcx().const_qualif_map
.borrow().get(&expr
.id
).unwrap();
245 let adjusted_global
= !qualif
.intersects(ConstQualif
::NON_STATIC_BORROWS
);
246 let global
= if !qualif
.intersects(ConstQualif
::NOT_CONST
| ConstQualif
::NEEDS_DROP
) {
247 match consts
::get_const_expr_as_global(bcx
.ccx(), expr
, qualif
,
248 bcx
.fcx
.param_substs
,
249 consts
::TrueConst
::No
) {
251 if qualif
.intersects(ConstQualif
::HAS_STATIC_BORROWS
) {
252 // Is borrowed as 'static, must return lvalue.
254 // Cast pointer to global, because constants have different types.
255 let const_ty
= expr_ty_adjusted(bcx
, expr
);
256 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
257 let global
= PointerCast(bcx
, global
, llty
.ptr_to());
258 let datum
= Datum
::new(global
, const_ty
, Lvalue
::new("expr::trans"));
259 return DatumBlock
::new(bcx
, datum
.to_expr_datum());
262 // Otherwise, keep around and perform adjustments, if needed.
263 let const_ty
= if adjusted_global
{
264 expr_ty_adjusted(bcx
, expr
)
269 // This could use a better heuristic.
270 Some(if type_is_immediate(bcx
.ccx(), const_ty
) {
271 // Cast pointer to global, because constants have different types.
272 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
273 let global
= PointerCast(bcx
, global
, llty
.ptr_to());
274 // Maybe just get the value directly, instead of loading it?
275 immediate_rvalue(load_ty(bcx
, global
, const_ty
), const_ty
)
277 let scratch
= alloc_ty(bcx
, const_ty
, "const");
278 call_lifetime_start(bcx
, scratch
);
279 let lldest
= if !const_ty
.is_structural() {
280 // Cast pointer to slot, because constants have different types.
281 PointerCast(bcx
, scratch
, val_ty(global
))
283 // In this case, memcpy_ty calls llvm.memcpy after casting both
284 // source and destination to i8*, so we don't need any casts.
287 memcpy_ty(bcx
, lldest
, global
, const_ty
);
288 Datum
::new(scratch
, const_ty
, Rvalue
::new(ByRef
))
291 Err(consts
::ConstEvalFailure
::Runtime(_
)) => {
292 // in case const evaluation errors, translate normally
293 // debug assertions catch the same errors
297 Err(consts
::ConstEvalFailure
::Compiletime(_
)) => {
298 // generate a dummy llvm value
299 let const_ty
= expr_ty(bcx
, expr
);
300 let llty
= type_of
::type_of(bcx
.ccx(), const_ty
);
301 let dummy
= C_undef(llty
.ptr_to());
302 Some(Datum
::new(dummy
, const_ty
, Rvalue
::new(ByRef
)))
309 let cleanup_debug_loc
= debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
313 fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
314 let datum
= match global
{
315 Some(rvalue
) => rvalue
.to_expr_datum(),
316 None
=> unpack_datum
!(bcx
, trans_unadjusted(bcx
, expr
))
318 let datum
= if adjusted_global
{
319 datum
// trans::consts already performed adjustments.
321 unpack_datum
!(bcx
, apply_adjustments(bcx
, expr
, datum
))
323 bcx
= fcx
.pop_and_trans_ast_cleanup_scope(bcx
, expr
.id
);
324 return DatumBlock
::new(bcx
, datum
);
327 pub fn get_meta(bcx
: Block
, fat_ptr
: ValueRef
) -> ValueRef
{
328 StructGEP(bcx
, fat_ptr
, abi
::FAT_PTR_EXTRA
)
331 pub fn get_dataptr(bcx
: Block
, fat_ptr
: ValueRef
) -> ValueRef
{
332 StructGEP(bcx
, fat_ptr
, abi
::FAT_PTR_ADDR
)
335 pub fn copy_fat_ptr(bcx
: Block
, src_ptr
: ValueRef
, dst_ptr
: ValueRef
) {
336 Store(bcx
, Load(bcx
, get_dataptr(bcx
, src_ptr
)), get_dataptr(bcx
, dst_ptr
));
337 Store(bcx
, Load(bcx
, get_meta(bcx
, src_ptr
)), get_meta(bcx
, dst_ptr
));
340 fn adjustment_required
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
341 expr
: &hir
::Expr
) -> bool
{
342 let adjustment
= match bcx
.tcx().tables
.borrow().adjustments
.get(&expr
.id
).cloned() {
343 None
=> { return false; }
347 // Don't skip a conversion from Box<T> to &T, etc.
348 if bcx
.tcx().is_overloaded_autoderef(expr
.id
, 0) {
353 AdjustReifyFnPointer
=> {
354 // FIXME(#19925) once fn item types are
355 // zero-sized, we'll need to return true here
358 AdjustUnsafeFnPointer
| AdjustMutToConstPointer
=> {
359 // purely a type-level thing
362 AdjustDerefRef(ref adj
) => {
363 // We are a bit paranoid about adjustments and thus might have a re-
364 // borrow here which merely derefs and then refs again (it might have
365 // a different region or mutability, but we don't care here).
366 !(adj
.autoderefs
== 1 && adj
.autoref
.is_some() && adj
.unsize
.is_none())
371 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
372 /// translation of `expr`.
373 fn apply_adjustments
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
375 datum
: Datum
<'tcx
, Expr
>)
376 -> DatumBlock
<'blk
, 'tcx
, Expr
>
379 let mut datum
= datum
;
380 let adjustment
= match bcx
.tcx().tables
.borrow().adjustments
.get(&expr
.id
).cloned() {
382 return DatumBlock
::new(bcx
, datum
);
386 debug
!("unadjusted datum for expr {:?}: {} adjustment={:?}",
388 datum
.to_string(bcx
.ccx()),
391 AdjustReifyFnPointer
=> {
392 // FIXME(#19925) once fn item types are
393 // zero-sized, we'll need to do something here
395 AdjustUnsafeFnPointer
| AdjustMutToConstPointer
=> {
396 // purely a type-level thing
398 AdjustDerefRef(ref adj
) => {
399 let skip_reborrows
= if adj
.autoderefs
== 1 && adj
.autoref
.is_some() {
400 // We are a bit paranoid about adjustments and thus might have a re-
401 // borrow here which merely derefs and then refs again (it might have
402 // a different region or mutability, but we don't care here).
404 // Don't skip a conversion from Box<T> to &T, etc.
406 if bcx
.tcx().is_overloaded_autoderef(expr
.id
, 0) {
407 // Don't skip an overloaded deref.
419 if adj
.autoderefs
> skip_reborrows
{
421 let lval
= unpack_datum
!(bcx
, datum
.to_lvalue_datum(bcx
, "auto_deref", expr
.id
));
422 datum
= unpack_datum
!(bcx
, deref_multiple(bcx
, expr
,
423 lval
.to_expr_datum(),
424 adj
.autoderefs
- skip_reborrows
));
427 // (You might think there is a more elegant way to do this than a
428 // skip_reborrows bool, but then you remember that the borrow checker exists).
429 if skip_reborrows
== 0 && adj
.autoref
.is_some() {
430 datum
= unpack_datum
!(bcx
, auto_ref(bcx
, datum
, expr
));
433 if let Some(target
) = adj
.unsize
{
434 // We do not arrange cleanup ourselves; if we already are an
435 // L-value, then cleanup will have already been scheduled (and
436 // the `datum.to_rvalue_datum` call below will emit code to zero
437 // the drop flag when moving out of the L-value). If we are an
438 // R-value, then we do not need to schedule cleanup.
439 let source_datum
= unpack_datum
!(bcx
,
440 datum
.to_rvalue_datum(bcx
, "__coerce_source"));
442 let target
= bcx
.monomorphize(&target
);
444 let scratch
= alloc_ty(bcx
, target
, "__coerce_target");
445 call_lifetime_start(bcx
, scratch
);
446 let target_datum
= Datum
::new(scratch
, target
,
448 bcx
= coerce_unsized(bcx
, expr
.span
, source_datum
, target_datum
);
449 datum
= Datum
::new(scratch
, target
,
450 RvalueExpr(Rvalue
::new(ByRef
)));
454 debug
!("after adjustments, datum={}", datum
.to_string(bcx
.ccx()));
455 DatumBlock
::new(bcx
, datum
)
458 fn coerce_unsized
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
460 source
: Datum
<'tcx
, Rvalue
>,
461 target
: Datum
<'tcx
, Rvalue
>)
462 -> Block
<'blk
, 'tcx
> {
464 debug
!("coerce_unsized({} -> {})",
465 source
.to_string(bcx
.ccx()),
466 target
.to_string(bcx
.ccx()));
468 match (&source
.ty
.sty
, &target
.ty
.sty
) {
469 (&ty
::TyBox(a
), &ty
::TyBox(b
)) |
470 (&ty
::TyRef(_
, ty
::TypeAndMut { ty: a, .. }
),
471 &ty
::TyRef(_
, ty
::TypeAndMut { ty: b, .. }
)) |
472 (&ty
::TyRef(_
, ty
::TypeAndMut { ty: a, .. }
),
473 &ty
::TyRawPtr(ty
::TypeAndMut { ty: b, .. }
)) |
474 (&ty
::TyRawPtr(ty
::TypeAndMut { ty: a, .. }
),
475 &ty
::TyRawPtr(ty
::TypeAndMut { ty: b, .. }
)) => {
476 let (inner_source
, inner_target
) = (a
, b
);
478 let (base
, old_info
) = if !type_is_sized(bcx
.tcx(), inner_source
) {
479 // Normally, the source is a thin pointer and we are
480 // adding extra info to make a fat pointer. The exception
481 // is when we are upcasting an existing object fat pointer
482 // to use a different vtable. In that case, we want to
483 // load out the original data pointer so we can repackage
485 (Load(bcx
, get_dataptr(bcx
, source
.val
)),
486 Some(Load(bcx
, get_meta(bcx
, source
.val
))))
488 let val
= if source
.kind
.is_by_ref() {
489 load_ty(bcx
, source
.val
, source
.ty
)
496 let info
= unsized_info(bcx
.ccx(), inner_source
, inner_target
,
497 old_info
, bcx
.fcx
.param_substs
);
499 // Compute the base pointer. This doesn't change the pointer value,
500 // but merely its type.
501 let ptr_ty
= type_of
::in_memory_type_of(bcx
.ccx(), inner_target
).ptr_to();
502 let base
= PointerCast(bcx
, base
, ptr_ty
);
504 Store(bcx
, base
, get_dataptr(bcx
, target
.val
));
505 Store(bcx
, info
, get_meta(bcx
, target
.val
));
508 // This can be extended to enums and tuples in the future.
509 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
510 (&ty
::TyStruct(def_id_a
, _
), &ty
::TyStruct(def_id_b
, _
)) => {
511 assert_eq
!(def_id_a
, def_id_b
);
513 // The target is already by-ref because it's to be written to.
514 let source
= unpack_datum
!(bcx
, source
.to_ref_datum(bcx
));
515 assert
!(target
.kind
.is_by_ref());
517 let kind
= custom_coerce_unsize_info(bcx
.ccx(), source
.ty
, target
.ty
);
519 let repr_source
= adt
::represent_type(bcx
.ccx(), source
.ty
);
520 let src_fields
= match &*repr_source
{
521 &adt
::Repr
::Univariant(ref s
, _
) => &s
.fields
,
522 _
=> bcx
.sess().span_bug(span
,
523 &format
!("Non univariant struct? (repr_source: {:?})",
526 let repr_target
= adt
::represent_type(bcx
.ccx(), target
.ty
);
527 let target_fields
= match &*repr_target
{
528 &adt
::Repr
::Univariant(ref s
, _
) => &s
.fields
,
529 _
=> bcx
.sess().span_bug(span
,
530 &format
!("Non univariant struct? (repr_target: {:?})",
534 let coerce_index
= match kind
{
535 CustomCoerceUnsized
::Struct(i
) => i
537 assert
!(coerce_index
< src_fields
.len() && src_fields
.len() == target_fields
.len());
539 let source_val
= adt
::MaybeSizedValue
::sized(source
.val
);
540 let target_val
= adt
::MaybeSizedValue
::sized(target
.val
);
542 let iter
= src_fields
.iter().zip(target_fields
).enumerate();
543 for (i
, (src_ty
, target_ty
)) in iter
{
544 let ll_source
= adt
::trans_field_ptr(bcx
, &repr_source
, source_val
, Disr(0), i
);
545 let ll_target
= adt
::trans_field_ptr(bcx
, &repr_target
, target_val
, Disr(0), i
);
547 // If this is the field we need to coerce, recurse on it.
548 if i
== coerce_index
{
549 coerce_unsized(bcx
, span
,
550 Datum
::new(ll_source
, src_ty
,
552 Datum
::new(ll_target
, target_ty
,
553 Rvalue
::new(ByRef
)));
555 // Otherwise, simply copy the data from the source.
556 assert
!(src_ty
.is_phantom_data() || src_ty
== target_ty
);
557 memcpy_ty(bcx
, ll_target
, ll_source
, src_ty
);
561 _
=> bcx
.sess().bug(&format
!("coerce_unsized: invalid coercion {:?} -> {:?}",
568 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
569 /// that the expr represents.
571 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
572 /// something like `x().f` is translated into roughly the equivalent of
574 /// { tmp = x(); tmp.f }
575 pub fn trans_to_lvalue
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
578 -> DatumBlock
<'blk
, 'tcx
, Lvalue
> {
580 let datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
581 return datum
.to_lvalue_datum(bcx
, name
, expr
.id
);
584 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
586 fn trans_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
588 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
591 debug
!("trans_unadjusted(expr={:?})", expr
);
592 let _indenter
= indenter();
594 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
596 return match expr_kind(bcx
.tcx(), expr
) {
597 ExprKind
::Lvalue
| ExprKind
::RvalueDatum
=> {
598 let datum
= unpack_datum
!(bcx
, {
599 trans_datum_unadjusted(bcx
, expr
)
602 DatumBlock {bcx: bcx, datum: datum}
605 ExprKind
::RvalueStmt
=> {
606 bcx
= trans_rvalue_stmt_unadjusted(bcx
, expr
);
607 nil(bcx
, expr_ty(bcx
, expr
))
610 ExprKind
::RvalueDps
=> {
611 let ty
= expr_ty(bcx
, expr
);
612 if type_is_zero_size(bcx
.ccx(), ty
) {
613 bcx
= trans_rvalue_dps_unadjusted(bcx
, expr
, Ignore
);
616 let scratch
= rvalue_scratch_datum(bcx
, ty
, "");
617 bcx
= trans_rvalue_dps_unadjusted(
618 bcx
, expr
, SaveIn(scratch
.val
));
620 // Note: this is not obviously a good idea. It causes
621 // immediate values to be loaded immediately after a
622 // return from a call or other similar expression,
623 // which in turn leads to alloca's having shorter
624 // lifetimes and hence larger stack frames. However,
625 // in turn it can lead to more register pressure.
626 // Still, in practice it seems to increase
627 // performance, since we have fewer problems with
629 let scratch
= unpack_datum
!(
630 bcx
, scratch
.to_appropriate_datum(bcx
));
632 DatumBlock
::new(bcx
, scratch
.to_expr_datum())
637 fn nil
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, ty
: Ty
<'tcx
>)
638 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
639 let llval
= C_undef(type_of
::type_of(bcx
.ccx(), ty
));
640 let datum
= immediate_rvalue(llval
, ty
);
641 DatumBlock
::new(bcx
, datum
.to_expr_datum())
645 fn trans_datum_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
647 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
650 let _icx
= push_ctxt("trans_datum_unadjusted");
653 hir
::ExprType(ref e
, _
) => {
656 hir
::ExprPath(..) => {
657 trans_def(bcx
, expr
, bcx
.def(expr
.id
))
659 hir
::ExprField(ref base
, name
) => {
660 trans_rec_field(bcx
, &base
, name
.node
)
662 hir
::ExprTupField(ref base
, idx
) => {
663 trans_rec_tup_field(bcx
, &base
, idx
.node
)
665 hir
::ExprIndex(ref base
, ref idx
) => {
666 trans_index(bcx
, expr
, &base
, &idx
, MethodCall
::expr(expr
.id
))
668 hir
::ExprBox(ref contents
) => {
669 // Special case for `Box<T>`
670 let box_ty
= expr_ty(bcx
, expr
);
671 let contents_ty
= expr_ty(bcx
, &contents
);
674 trans_uniq_expr(bcx
, expr
, box_ty
, &contents
, contents_ty
)
676 _
=> bcx
.sess().span_bug(expr
.span
,
677 "expected unique box")
681 hir
::ExprLit(ref lit
) => trans_immediate_lit(bcx
, expr
, &lit
),
682 hir
::ExprBinary(op
, ref lhs
, ref rhs
) => {
683 trans_binary(bcx
, expr
, op
, &lhs
, &rhs
)
685 hir
::ExprUnary(op
, ref x
) => {
686 trans_unary(bcx
, expr
, op
, &x
)
688 hir
::ExprAddrOf(_
, ref x
) => {
690 hir
::ExprRepeat(..) | hir
::ExprVec(..) => {
691 // Special case for slices.
692 let cleanup_debug_loc
=
693 debuginfo
::get_cleanup_debug_loc_for_ast_node(bcx
.ccx(),
697 fcx
.push_ast_cleanup_scope(cleanup_debug_loc
);
698 let datum
= unpack_datum
!(
699 bcx
, tvec
::trans_slice_vec(bcx
, expr
, &x
));
700 bcx
= fcx
.pop_and_trans_ast_cleanup_scope(bcx
, x
.id
);
701 DatumBlock
::new(bcx
, datum
)
704 trans_addr_of(bcx
, expr
, &x
)
708 hir
::ExprCast(ref val
, _
) => {
709 // Datum output mode means this is a scalar cast:
710 trans_imm_cast(bcx
, &val
, expr
.id
)
713 bcx
.tcx().sess
.span_bug(
715 &format
!("trans_rvalue_datum_unadjusted reached \
716 fall-through case: {:?}",
722 fn trans_field
<'blk
, 'tcx
, F
>(bcx
: Block
<'blk
, 'tcx
>,
725 -> DatumBlock
<'blk
, 'tcx
, Expr
> where
726 F
: FnOnce(&'blk ty
::ctxt
<'tcx
>, &VariantInfo
<'tcx
>) -> usize,
729 let _icx
= push_ctxt("trans_rec_field");
731 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, base
, "field"));
732 let bare_ty
= base_datum
.ty
;
733 let repr
= adt
::represent_type(bcx
.ccx(), bare_ty
);
734 let vinfo
= VariantInfo
::from_ty(bcx
.tcx(), bare_ty
, None
);
736 let ix
= get_idx(bcx
.tcx(), &vinfo
);
737 let d
= base_datum
.get_element(
741 adt
::trans_field_ptr(bcx
, &repr
, srcval
, vinfo
.discr
, ix
)
744 if type_is_sized(bcx
.tcx(), d
.ty
) {
745 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
747 let scratch
= rvalue_scratch_datum(bcx
, d
.ty
, "");
748 Store(bcx
, d
.val
, get_dataptr(bcx
, scratch
.val
));
749 let info
= Load(bcx
, get_meta(bcx
, base_datum
.val
));
750 Store(bcx
, info
, get_meta(bcx
, scratch
.val
));
752 // Always generate an lvalue datum, because this pointer doesn't own
753 // the data and cleanup is scheduled elsewhere.
754 DatumBlock
::new(bcx
, Datum
::new(scratch
.val
, scratch
.ty
, LvalueExpr(d
.kind
)))
758 /// Translates `base.field`.
759 fn trans_rec_field
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
762 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
763 trans_field(bcx
, base
, |_
, vinfo
| vinfo
.field_index(field
))
766 /// Translates `base.<idx>`.
767 fn trans_rec_tup_field
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
770 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
771 trans_field(bcx
, base
, |_
, _
| idx
)
774 fn trans_index
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
775 index_expr
: &hir
::Expr
,
778 method_call
: MethodCall
)
779 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
780 //! Translates `base[idx]`.
782 let _icx
= push_ctxt("trans_index");
786 let index_expr_debug_loc
= index_expr
.debug_loc();
788 // Check for overloaded index.
789 let method_ty
= ccx
.tcx()
794 .map(|method
| method
.ty
);
795 let elt_datum
= match method_ty
{
797 let method_ty
= monomorphize_type(bcx
, method_ty
);
799 let base_datum
= unpack_datum
!(bcx
, trans(bcx
, base
));
801 // Translate index expression.
802 let ix_datum
= unpack_datum
!(bcx
, trans(bcx
, idx
));
804 let ref_ty
= // invoked methods have LB regions instantiated:
805 bcx
.tcx().no_late_bound_regions(&method_ty
.fn_ret()).unwrap().unwrap();
806 let elt_ty
= match ref_ty
.builtin_deref(true, ty
::NoPreference
) {
808 bcx
.tcx().sess
.span_bug(index_expr
.span
,
809 "index method didn't return a \
810 dereferenceable type?!")
812 Some(elt_tm
) => elt_tm
.ty
,
815 // Overloaded. Evaluate `trans_overloaded_op`, which will
816 // invoke the user's index() method, which basically yields
817 // a `&T` pointer. We can then proceed down the normal
818 // path (below) to dereference that `&T`.
819 let scratch
= rvalue_scratch_datum(bcx
, ref_ty
, "overloaded_index_elt");
821 trans_overloaded_op(bcx
,
825 Some((ix_datum
, idx
.id
)),
826 Some(SaveIn(scratch
.val
)),
828 let datum
= scratch
.to_expr_datum();
829 let lval
= Lvalue
::new("expr::trans_index overload");
830 if type_is_sized(bcx
.tcx(), elt_ty
) {
831 Datum
::new(datum
.to_llscalarish(bcx
), elt_ty
, LvalueExpr(lval
))
833 Datum
::new(datum
.val
, elt_ty
, LvalueExpr(lval
))
837 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
,
841 // Translate index expression and cast to a suitable LLVM integer.
842 // Rust is less strict than LLVM in this regard.
843 let ix_datum
= unpack_datum
!(bcx
, trans(bcx
, idx
));
844 let ix_val
= ix_datum
.to_llscalarish(bcx
);
845 let ix_size
= machine
::llbitsize_of_real(bcx
.ccx(),
847 let int_size
= machine
::llbitsize_of_real(bcx
.ccx(),
850 if ix_size
< int_size
{
851 if expr_ty(bcx
, idx
).is_signed() {
852 SExt(bcx
, ix_val
, ccx
.int_type())
853 } else { ZExt(bcx, ix_val, ccx.int_type()) }
854 } else if ix_size
> int_size
{
855 Trunc(bcx
, ix_val
, ccx
.int_type())
861 let unit_ty
= base_datum
.ty
.sequence_element_type(bcx
.tcx());
863 let (base
, len
) = base_datum
.get_vec_base_and_len(bcx
);
865 debug
!("trans_index: base {}", bcx
.val_to_string(base
));
866 debug
!("trans_index: len {}", bcx
.val_to_string(len
));
868 let bounds_check
= ICmp(bcx
,
872 index_expr_debug_loc
);
873 let expect
= ccx
.get_intrinsic(&("llvm.expect.i1"));
874 let expected
= Call(bcx
,
876 &[bounds_check
, C_bool(ccx
, false)],
878 index_expr_debug_loc
);
879 bcx
= with_cond(bcx
, expected
, |bcx
| {
880 controlflow
::trans_fail_bounds_check(bcx
,
881 expr_info(index_expr
),
885 let elt
= InBoundsGEP(bcx
, base
, &[ix_val
]);
886 let elt
= PointerCast(bcx
, elt
, type_of
::type_of(ccx
, unit_ty
).ptr_to());
887 let lval
= Lvalue
::new("expr::trans_index fallback");
888 Datum
::new(elt
, unit_ty
, LvalueExpr(lval
))
892 DatumBlock
::new(bcx
, elt_datum
)
895 fn trans_def
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
896 ref_expr
: &hir
::Expr
,
898 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
899 //! Translates a reference to a path.
901 let _icx
= push_ctxt("trans_def_lvalue");
903 Def
::Fn(..) | Def
::Method(..) |
904 Def
::Struct(..) | Def
::Variant(..) => {
905 let datum
= trans_def_fn_unadjusted(bcx
.ccx(), ref_expr
, def
,
906 bcx
.fcx
.param_substs
);
907 DatumBlock
::new(bcx
, datum
.to_expr_datum())
909 Def
::Static(did
, _
) => {
910 let const_ty
= expr_ty(bcx
, ref_expr
);
911 let val
= get_static_val(bcx
.ccx(), did
, const_ty
);
912 let lval
= Lvalue
::new("expr::trans_def");
913 DatumBlock
::new(bcx
, Datum
::new(val
, const_ty
, LvalueExpr(lval
)))
915 Def
::Const(_
) | Def
::AssociatedConst(_
) => {
916 bcx
.sess().span_bug(ref_expr
.span
,
917 "constant expression should not reach expr::trans_def")
920 DatumBlock
::new(bcx
, trans_local_var(bcx
, def
).to_expr_datum())
925 fn trans_rvalue_stmt_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
927 -> Block
<'blk
, 'tcx
> {
929 let _icx
= push_ctxt("trans_rvalue_stmt");
931 if bcx
.unreachable
.get() {
935 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
938 hir
::ExprBreak(label_opt
) => {
939 controlflow
::trans_break(bcx
, expr
, label_opt
.map(|l
| l
.node
.name
))
941 hir
::ExprType(ref e
, _
) => {
942 trans_into(bcx
, &e
, Ignore
)
944 hir
::ExprAgain(label_opt
) => {
945 controlflow
::trans_cont(bcx
, expr
, label_opt
.map(|l
| l
.node
.name
))
947 hir
::ExprRet(ref ex
) => {
948 // Check to see if the return expression itself is reachable.
949 // This can occur when the inner expression contains a return
950 let reachable
= if let Some(ref cfg
) = bcx
.fcx
.cfg
{
951 cfg
.node_is_reachable(expr
.id
)
957 controlflow
::trans_ret(bcx
, expr
, ex
.as_ref().map(|e
| &**e
))
959 // If it's not reachable, just translate the inner expression
960 // directly. This avoids having to manage a return slot when
961 // it won't actually be used anyway.
962 if let &Some(ref x
) = ex
{
963 bcx
= trans_into(bcx
, &x
, Ignore
);
965 // Mark the end of the block as unreachable. Once we get to
966 // a return expression, there's no more we should be doing
972 hir
::ExprWhile(ref cond
, ref body
, _
) => {
973 controlflow
::trans_while(bcx
, expr
, &cond
, &body
)
975 hir
::ExprLoop(ref body
, _
) => {
976 controlflow
::trans_loop(bcx
, expr
, &body
)
978 hir
::ExprAssign(ref dst
, ref src
) => {
979 let src_datum
= unpack_datum
!(bcx
, trans(bcx
, &src
));
980 let dst_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, &dst
, "assign"));
982 if bcx
.fcx
.type_needs_drop(dst_datum
.ty
) {
983 // If there are destructors involved, make sure we
984 // are copying from an rvalue, since that cannot possible
985 // alias an lvalue. We are concerned about code like:
993 // where e.g. a : Option<Foo> and a.b :
994 // Option<Foo>. In that case, freeing `a` before the
995 // assignment may also free `a.b`!
997 // We could avoid this intermediary with some analysis
998 // to determine whether `dst` may possibly own `src`.
999 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
1000 let src_datum
= unpack_datum
!(
1001 bcx
, src_datum
.to_rvalue_datum(bcx
, "ExprAssign"));
1002 let opt_hint_datum
= dst_datum
.kind
.drop_flag_info
.hint_datum(bcx
);
1003 let opt_hint_val
= opt_hint_datum
.map(|d
|d
.to_value());
1005 // 1. Drop the data at the destination, passing the
1006 // drop-hint in case the lvalue has already been
1007 // dropped or moved.
1008 bcx
= glue
::drop_ty_core(bcx
,
1015 // 2. We are overwriting the destination; ensure that
1016 // its drop-hint (if any) says "initialized."
1017 if let Some(hint_val
) = opt_hint_val
{
1018 let hint_llval
= hint_val
.value();
1019 let drop_needed
= C_u8(bcx
.fcx
.ccx
, adt
::DTOR_NEEDED_HINT
);
1020 Store(bcx
, drop_needed
, hint_llval
);
1022 src_datum
.store_to(bcx
, dst_datum
.val
)
1024 src_datum
.store_to(bcx
, dst_datum
.val
)
1027 hir
::ExprAssignOp(op
, ref dst
, ref src
) => {
1028 let has_method_map
= bcx
.tcx()
1032 .contains_key(&MethodCall
::expr(expr
.id
));
1035 let dst
= unpack_datum
!(bcx
, trans(bcx
, &dst
));
1036 let src_datum
= unpack_datum
!(bcx
, trans(bcx
, &src
));
1037 trans_overloaded_op(bcx
, expr
, MethodCall
::expr(expr
.id
), dst
,
1038 Some((src_datum
, src
.id
)), None
, false).bcx
1040 trans_assign_op(bcx
, expr
, op
, &dst
, &src
)
1043 hir
::ExprInlineAsm(ref a
) => {
1044 asm
::trans_inline_asm(bcx
, a
)
1047 bcx
.tcx().sess
.span_bug(
1049 &format
!("trans_rvalue_stmt_unadjusted reached \
1050 fall-through case: {:?}",
1056 fn trans_rvalue_dps_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1059 -> Block
<'blk
, 'tcx
> {
1060 let _icx
= push_ctxt("trans_rvalue_dps_unadjusted");
1062 let tcx
= bcx
.tcx();
1064 debuginfo
::set_source_location(bcx
.fcx
, expr
.id
, expr
.span
);
1067 hir
::ExprType(ref e
, _
) => {
1068 trans_into(bcx
, &e
, dest
)
1070 hir
::ExprPath(..) => {
1071 trans_def_dps_unadjusted(bcx
, expr
, bcx
.def(expr
.id
), dest
)
1073 hir
::ExprIf(ref cond
, ref thn
, ref els
) => {
1074 controlflow
::trans_if(bcx
, expr
.id
, &cond
, &thn
, els
.as_ref().map(|e
| &**e
), dest
)
1076 hir
::ExprMatch(ref discr
, ref arms
, _
) => {
1077 _match
::trans_match(bcx
, expr
, &discr
, &arms
[..], dest
)
1079 hir
::ExprBlock(ref blk
) => {
1080 controlflow
::trans_block(bcx
, &blk
, dest
)
1082 hir
::ExprStruct(_
, ref fields
, ref base
) => {
1085 base
.as_ref().map(|e
| &**e
),
1088 node_id_type(bcx
, expr
.id
),
1091 hir
::ExprRange(ref start
, ref end
) => {
1092 // FIXME it is just not right that we are synthesising ast nodes in
1094 fn make_field(field_name
: &str, expr
: P
<hir
::Expr
>) -> hir
::Field
{
1096 name
: codemap
::dummy_spanned(token
::intern(field_name
)),
1098 span
: codemap
::DUMMY_SP
,
1102 // A range just desugars into a struct.
1103 // Note that the type of the start and end may not be the same, but
1104 // they should only differ in their lifetime, which should not matter
1106 let (did
, fields
, ty_params
) = match (start
, end
) {
1107 (&Some(ref start
), &Some(ref end
)) => {
1109 let fields
= vec
![make_field("start", start
.clone()),
1110 make_field("end", end
.clone())];
1111 (tcx
.lang_items
.range_struct(), fields
, vec
![node_id_type(bcx
, start
.id
)])
1113 (&Some(ref start
), &None
) => {
1114 // Desugar to RangeFrom
1115 let fields
= vec
![make_field("start", start
.clone())];
1116 (tcx
.lang_items
.range_from_struct(), fields
, vec
![node_id_type(bcx
, start
.id
)])
1118 (&None
, &Some(ref end
)) => {
1119 // Desugar to RangeTo
1120 let fields
= vec
![make_field("end", end
.clone())];
1121 (tcx
.lang_items
.range_to_struct(), fields
, vec
![node_id_type(bcx
, end
.id
)])
1124 // Desugar to RangeFull
1125 (tcx
.lang_items
.range_full_struct(), vec
![], vec
![])
1129 if let Some(did
) = did
{
1130 let substs
= Substs
::new_type(ty_params
, vec
![]);
1136 tcx
.mk_struct(tcx
.lookup_adt_def(did
),
1137 tcx
.mk_substs(substs
)),
1140 tcx
.sess
.span_bug(expr
.span
,
1141 "No lang item for ranges (how did we get this far?)")
1144 hir
::ExprTup(ref args
) => {
1145 let numbered_fields
: Vec
<(usize, &hir
::Expr
)> =
1146 args
.iter().enumerate().map(|(i
, arg
)| (i
, &**arg
)).collect();
1150 &numbered_fields
[..],
1155 hir
::ExprLit(ref lit
) => {
1157 ast
::LitKind
::Str(ref s
, _
) => {
1158 tvec
::trans_lit_str(bcx
, expr
, (*s
).clone(), dest
)
1163 .span_bug(expr
.span
,
1164 "trans_rvalue_dps_unadjusted shouldn't be \
1165 translating this type of literal")
1169 hir
::ExprVec(..) | hir
::ExprRepeat(..) => {
1170 tvec
::trans_fixed_vstore(bcx
, expr
, dest
)
1172 hir
::ExprClosure(_
, ref decl
, ref body
) => {
1173 let dest
= match dest
{
1174 SaveIn(lldest
) => closure
::Dest
::SaveIn(bcx
, lldest
),
1175 Ignore
=> closure
::Dest
::Ignore(bcx
.ccx())
1178 // NB. To get the id of the closure, we don't use
1179 // `local_def_id(id)`, but rather we extract the closure
1180 // def-id from the expr's type. This is because this may
1181 // be an inlined expression from another crate, and we
1182 // want to get the ORIGINAL closure def-id, since that is
1183 // the key we need to find the closure-kind and
1184 // closure-type etc.
1185 let (def_id
, substs
) = match expr_ty(bcx
, expr
).sty
{
1186 ty
::TyClosure(def_id
, ref substs
) => (def_id
, substs
),
1188 bcx
.tcx().sess
.span_bug(
1190 &format
!("closure expr without closure type: {:?}", t
)),
1193 closure
::trans_closure_expr(dest
,
1199 &expr
.attrs
).unwrap_or(bcx
)
1201 hir
::ExprCall(ref f
, ref args
) => {
1202 if bcx
.tcx().is_method_call(expr
.id
) {
1203 trans_overloaded_call(bcx
,
1209 callee
::trans_call(bcx
,
1212 callee
::ArgExprs(&args
[..]),
1216 hir
::ExprMethodCall(_
, _
, ref args
) => {
1217 callee
::trans_method_call(bcx
,
1220 callee
::ArgExprs(&args
[..]),
1223 hir
::ExprBinary(op
, ref lhs
, ref rhs
) => {
1224 // if not overloaded, would be RvalueDatumExpr
1225 let lhs
= unpack_datum
!(bcx
, trans(bcx
, &lhs
));
1226 let rhs_datum
= unpack_datum
!(bcx
, trans(bcx
, &rhs
));
1227 trans_overloaded_op(bcx
, expr
, MethodCall
::expr(expr
.id
), lhs
,
1228 Some((rhs_datum
, rhs
.id
)), Some(dest
),
1229 !rustc_front
::util
::is_by_value_binop(op
.node
)).bcx
1231 hir
::ExprUnary(op
, ref subexpr
) => {
1232 // if not overloaded, would be RvalueDatumExpr
1233 let arg
= unpack_datum
!(bcx
, trans(bcx
, &subexpr
));
1234 trans_overloaded_op(bcx
, expr
, MethodCall
::expr(expr
.id
),
1235 arg
, None
, Some(dest
), !rustc_front
::util
::is_by_value_unop(op
)).bcx
1237 hir
::ExprIndex(ref base
, ref idx
) => {
1238 // if not overloaded, would be RvalueDatumExpr
1239 let base
= unpack_datum
!(bcx
, trans(bcx
, &base
));
1240 let idx_datum
= unpack_datum
!(bcx
, trans(bcx
, &idx
));
1241 trans_overloaded_op(bcx
, expr
, MethodCall
::expr(expr
.id
), base
,
1242 Some((idx_datum
, idx
.id
)), Some(dest
), true).bcx
1244 hir
::ExprCast(..) => {
1245 // Trait casts used to come this way, now they should be coercions.
1246 bcx
.tcx().sess
.span_bug(expr
.span
, "DPS expr_cast (residual trait cast?)")
1248 hir
::ExprAssignOp(op
, _
, _
) => {
1249 bcx
.tcx().sess
.span_bug(
1251 &format
!("augmented assignment `{}=` should always be a rvalue_stmt",
1252 rustc_front
::util
::binop_to_string(op
.node
)))
1255 bcx
.tcx().sess
.span_bug(
1257 &format
!("trans_rvalue_dps_unadjusted reached fall-through \
1264 fn trans_def_dps_unadjusted
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1265 ref_expr
: &hir
::Expr
,
1268 -> Block
<'blk
, 'tcx
> {
1269 let _icx
= push_ctxt("trans_def_dps_unadjusted");
1271 let lldest
= match dest
{
1272 SaveIn(lldest
) => lldest
,
1273 Ignore
=> { return bcx; }
1277 Def
::Variant(tid
, vid
) => {
1278 let variant
= bcx
.tcx().lookup_adt_def(tid
).variant_with_id(vid
);
1279 if let ty
::VariantKind
::Tuple
= variant
.kind() {
1281 let llfn
= callee
::trans_fn_ref(bcx
.ccx(), vid
,
1282 ExprId(ref_expr
.id
),
1283 bcx
.fcx
.param_substs
).val
;
1284 Store(bcx
, llfn
, lldest
);
1288 let ty
= expr_ty(bcx
, ref_expr
);
1289 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1290 adt
::trans_set_discr(bcx
, &repr
, lldest
, Disr
::from(variant
.disr_val
));
1294 Def
::Struct(..) => {
1295 let ty
= expr_ty(bcx
, ref_expr
);
1297 ty
::TyStruct(def
, _
) if def
.has_dtor() => {
1298 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1299 adt
::trans_set_discr(bcx
, &repr
, lldest
, Disr(0));
1306 bcx
.tcx().sess
.span_bug(ref_expr
.span
, &format
!(
1307 "Non-DPS def {:?} referened by {}",
1308 def
, bcx
.node_id_to_string(ref_expr
.id
)));
1313 pub fn trans_def_fn_unadjusted
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1314 ref_expr
: &hir
::Expr
,
1316 param_substs
: &'tcx Substs
<'tcx
>)
1317 -> Datum
<'tcx
, Rvalue
> {
1318 let _icx
= push_ctxt("trans_def_datum_unadjusted");
1322 Def
::Struct(did
) | Def
::Variant(_
, did
) => {
1323 callee
::trans_fn_ref(ccx
, did
, ExprId(ref_expr
.id
), param_substs
)
1325 Def
::Method(method_did
) => {
1326 match ccx
.tcx().impl_or_trait_item(method_did
).container() {
1327 ty
::ImplContainer(_
) => {
1328 callee
::trans_fn_ref(ccx
, method_did
,
1329 ExprId(ref_expr
.id
),
1332 ty
::TraitContainer(trait_did
) => {
1333 meth
::trans_static_method_callee(ccx
, method_did
,
1334 trait_did
, ref_expr
.id
,
1340 ccx
.tcx().sess
.span_bug(ref_expr
.span
, &format
!(
1341 "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
1348 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1349 pub fn trans_local_var
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1351 -> Datum
<'tcx
, Lvalue
> {
1352 let _icx
= push_ctxt("trans_local_var");
1355 Def
::Upvar(_
, nid
, _
, _
) => {
1356 // Can't move upvars, so this is never a ZeroMemLastUse.
1357 let local_ty
= node_id_type(bcx
, nid
);
1358 let lval
= Lvalue
::new_with_hint("expr::trans_local_var (upvar)",
1359 bcx
, nid
, HintKind
::ZeroAndMaintain
);
1360 match bcx
.fcx
.llupvars
.borrow().get(&nid
) {
1361 Some(&val
) => Datum
::new(val
, local_ty
, lval
),
1363 bcx
.sess().bug(&format
!(
1364 "trans_local_var: no llval for upvar {} found",
1369 Def
::Local(_
, nid
) => {
1370 let datum
= match bcx
.fcx
.lllocals
.borrow().get(&nid
) {
1373 bcx
.sess().bug(&format
!(
1374 "trans_local_var: no datum for local/arg {} found",
1378 debug
!("take_local(nid={}, v={}, ty={})",
1379 nid
, bcx
.val_to_string(datum
.val
), datum
.ty
);
1383 bcx
.sess().unimpl(&format
!(
1384 "unsupported def type in trans_local_var: {:?}",
1390 fn trans_struct
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1391 fields
: &[hir
::Field
],
1392 base
: Option
<&hir
::Expr
>,
1393 expr_span
: codemap
::Span
,
1394 expr_id
: ast
::NodeId
,
1396 dest
: Dest
) -> Block
<'blk
, 'tcx
> {
1397 let _icx
= push_ctxt("trans_rec");
1399 let tcx
= bcx
.tcx();
1400 let vinfo
= VariantInfo
::of_node(tcx
, ty
, expr_id
);
1402 let mut need_base
= vec
![true; vinfo
.fields
.len()];
1404 let numbered_fields
= fields
.iter().map(|field
| {
1405 let pos
= vinfo
.field_index(field
.name
.node
);
1406 need_base
[pos
] = false;
1408 }).collect
::<Vec
<_
>>();
1410 let optbase
= match base
{
1411 Some(base_expr
) => {
1412 let mut leftovers
= Vec
::new();
1413 for (i
, b
) in need_base
.iter().enumerate() {
1415 leftovers
.push((i
, vinfo
.fields
[i
].1));
1418 Some(StructBaseInfo
{expr
: base_expr
,
1419 fields
: leftovers
})
1422 if need_base
.iter().any(|b
| *b
) {
1423 tcx
.sess
.span_bug(expr_span
, "missing fields and no base expr")
1435 DebugLoc
::At(expr_id
, expr_span
))
1438 /// Information that `trans_adt` needs in order to fill in the fields
1439 /// of a struct copied from a base struct (e.g., from an expression
1440 /// like `Foo { a: b, ..base }`.
1442 /// Note that `fields` may be empty; the base expression must always be
1443 /// evaluated for side-effects.
1444 pub struct StructBaseInfo
<'a
, 'tcx
> {
1445 /// The base expression; will be evaluated after all explicit fields.
1446 expr
: &'a hir
::Expr
,
1447 /// The indices of fields to copy paired with their types.
1448 fields
: Vec
<(usize, Ty
<'tcx
>)>
1451 /// Constructs an ADT instance:
1453 /// - `fields` should be a list of field indices paired with the
1454 /// expression to store into that field. The initializers will be
1455 /// evaluated in the order specified by `fields`.
1457 /// - `optbase` contains information on the base struct (if any) from
1458 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1459 pub fn trans_adt
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
1462 fields
: &[(usize, &hir
::Expr
)],
1463 optbase
: Option
<StructBaseInfo
<'a
, 'tcx
>>,
1465 debug_location
: DebugLoc
)
1466 -> Block
<'blk
, 'tcx
> {
1467 let _icx
= push_ctxt("trans_adt");
1469 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
1471 debug_location
.apply(bcx
.fcx
);
1473 // If we don't care about the result, just make a
1474 // temporary stack slot
1475 let addr
= match dest
{
1478 let llresult
= alloc_ty(bcx
, ty
, "temp");
1479 call_lifetime_start(bcx
, llresult
);
1484 debug
!("trans_adt");
1486 // This scope holds intermediates that must be cleaned should
1487 // panic occur before the ADT as a whole is ready.
1488 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1491 // Issue 23112: The original logic appeared vulnerable to same
1492 // order-of-eval bug. But, SIMD values are tuple-structs;
1493 // i.e. functional record update (FRU) syntax is unavailable.
1495 // To be safe, double-check that we did not get here via FRU.
1496 assert
!(optbase
.is_none());
1498 // This is the constructor of a SIMD type, such types are
1499 // always primitive machine types and so do not have a
1500 // destructor or require any clean-up.
1501 let llty
= type_of
::type_of(bcx
.ccx(), ty
);
1503 // keep a vector as a register, and running through the field
1504 // `insertelement`ing them directly into that register
1505 // (i.e. avoid GEPi and `store`s to an alloca) .
1506 let mut vec_val
= C_undef(llty
);
1508 for &(i
, ref e
) in fields
{
1509 let block_datum
= trans(bcx
, &e
);
1510 bcx
= block_datum
.bcx
;
1511 let position
= C_uint(bcx
.ccx(), i
);
1512 let value
= block_datum
.datum
.to_llscalarish(bcx
);
1513 vec_val
= InsertElement(bcx
, vec_val
, value
, position
);
1515 Store(bcx
, vec_val
, addr
);
1516 } else if let Some(base
) = optbase
{
1517 // Issue 23112: If there is a base, then order-of-eval
1518 // requires field expressions eval'ed before base expression.
1520 // First, trans field expressions to temporary scratch values.
1521 let scratch_vals
: Vec
<_
> = fields
.iter().map(|&(i
, ref e
)| {
1522 let datum
= unpack_datum
!(bcx
, trans(bcx
, &e
));
1526 debug_location
.apply(bcx
.fcx
);
1528 // Second, trans the base to the dest.
1529 assert_eq
!(discr
, Disr(0));
1531 let addr
= adt
::MaybeSizedValue
::sized(addr
);
1532 match expr_kind(bcx
.tcx(), &base
.expr
) {
1533 ExprKind
::RvalueDps
| ExprKind
::RvalueDatum
if !bcx
.fcx
.type_needs_drop(ty
) => {
1534 bcx
= trans_into(bcx
, &base
.expr
, SaveIn(addr
.value
));
1536 ExprKind
::RvalueStmt
=> {
1537 bcx
.tcx().sess
.bug("unexpected expr kind for struct base expr")
1540 let base_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, &base
.expr
, "base"));
1541 for &(i
, t
) in &base
.fields
{
1542 let datum
= base_datum
.get_element(
1543 bcx
, t
, |srcval
| adt
::trans_field_ptr(bcx
, &repr
, srcval
, discr
, i
));
1544 assert
!(type_is_sized(bcx
.tcx(), datum
.ty
));
1545 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1546 bcx
= datum
.store_to(bcx
, dest
);
1551 // Finally, move scratch field values into actual field locations
1552 for (i
, datum
) in scratch_vals
{
1553 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1554 bcx
= datum
.store_to(bcx
, dest
);
1557 // No base means we can write all fields directly in place.
1558 let addr
= adt
::MaybeSizedValue
::sized(addr
);
1559 for &(i
, ref e
) in fields
{
1560 let dest
= adt
::trans_field_ptr(bcx
, &repr
, addr
, discr
, i
);
1561 let e_ty
= expr_ty_adjusted(bcx
, &e
);
1562 bcx
= trans_into(bcx
, &e
, SaveIn(dest
));
1563 let scope
= cleanup
::CustomScope(custom_cleanup_scope
);
1564 fcx
.schedule_lifetime_end(scope
, dest
);
1565 // FIXME: nonzeroing move should generalize to fields
1566 fcx
.schedule_drop_mem(scope
, dest
, e_ty
, None
);
1570 adt
::trans_set_discr(bcx
, &repr
, addr
, discr
);
1572 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1574 // If we don't care about the result drop the temporary we made
1578 bcx
= glue
::drop_ty(bcx
, addr
, ty
, debug_location
);
1579 base
::call_lifetime_end(bcx
, addr
);
1586 fn trans_immediate_lit
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1589 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1590 // must not be a string constant, that is a RvalueDpsExpr
1591 let _icx
= push_ctxt("trans_immediate_lit");
1592 let ty
= expr_ty(bcx
, expr
);
1593 let v
= consts
::const_lit(bcx
.ccx(), expr
, lit
);
1594 immediate_rvalue_bcx(bcx
, v
, ty
).to_expr_datumblock()
1597 fn trans_unary
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1600 sub_expr
: &hir
::Expr
)
1601 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1602 let ccx
= bcx
.ccx();
1604 let _icx
= push_ctxt("trans_unary_datum");
1606 let method_call
= MethodCall
::expr(expr
.id
);
1608 // The only overloaded operator that is translated to a datum
1609 // is an overloaded deref, since it is always yields a `&T`.
1610 // Otherwise, we should be in the RvalueDpsExpr path.
1611 assert
!(op
== hir
::UnDeref
|| !ccx
.tcx().is_method_call(expr
.id
));
1613 let un_ty
= expr_ty(bcx
, expr
);
1615 let debug_loc
= expr
.debug_loc();
1619 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1620 let llresult
= Not(bcx
, datum
.to_llscalarish(bcx
), debug_loc
);
1621 immediate_rvalue_bcx(bcx
, llresult
, un_ty
).to_expr_datumblock()
1624 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1625 let val
= datum
.to_llscalarish(bcx
);
1626 let (bcx
, llneg
) = {
1628 let result
= FNeg(bcx
, val
, debug_loc
);
1631 let is_signed
= un_ty
.is_signed();
1632 let result
= Neg(bcx
, val
, debug_loc
);
1633 let bcx
= if bcx
.ccx().check_overflow() && is_signed
{
1634 let (llty
, min
) = base
::llty_and_min_for_signed_ty(bcx
, un_ty
);
1635 let is_min
= ICmp(bcx
, llvm
::IntEQ
, val
,
1636 C_integral(llty
, min
, true), debug_loc
);
1637 with_cond(bcx
, is_min
, |bcx
| {
1638 let msg
= InternedString
::new(
1639 "attempted to negate with overflow");
1640 controlflow
::trans_fail(bcx
, expr_info(expr
), msg
)
1648 immediate_rvalue_bcx(bcx
, llneg
, un_ty
).to_expr_datumblock()
1651 let datum
= unpack_datum
!(bcx
, trans(bcx
, sub_expr
));
1652 deref_once(bcx
, expr
, datum
, method_call
)
1657 fn trans_uniq_expr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1658 box_expr
: &hir
::Expr
,
1660 contents
: &hir
::Expr
,
1661 contents_ty
: Ty
<'tcx
>)
1662 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1663 let _icx
= push_ctxt("trans_uniq_expr");
1665 assert
!(type_is_sized(bcx
.tcx(), contents_ty
));
1666 let llty
= type_of
::type_of(bcx
.ccx(), contents_ty
);
1667 let size
= llsize_of(bcx
.ccx(), llty
);
1668 let align
= C_uint(bcx
.ccx(), type_of
::align_of(bcx
.ccx(), contents_ty
));
1669 let llty_ptr
= llty
.ptr_to();
1670 let Result { bcx, val }
= malloc_raw_dyn(bcx
,
1675 box_expr
.debug_loc());
1676 // Unique boxes do not allocate for zero-size types. The standard library
1677 // may assume that `free` is never called on the pointer returned for
1678 // `Box<ZeroSizeType>`.
1679 let bcx
= if llsize_of_alloc(bcx
.ccx(), llty
) == 0 {
1680 trans_into(bcx
, contents
, SaveIn(val
))
1682 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1683 fcx
.schedule_free_value(cleanup
::CustomScope(custom_cleanup_scope
),
1684 val
, cleanup
::HeapExchange
, contents_ty
);
1685 let bcx
= trans_into(bcx
, contents
, SaveIn(val
));
1686 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1689 immediate_rvalue_bcx(bcx
, val
, box_ty
).to_expr_datumblock()
1692 fn trans_addr_of
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1694 subexpr
: &hir
::Expr
)
1695 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1696 let _icx
= push_ctxt("trans_addr_of");
1698 let sub_datum
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, subexpr
, "addr_of"));
1699 let ty
= expr_ty(bcx
, expr
);
1700 if !type_is_sized(bcx
.tcx(), sub_datum
.ty
) {
1701 // Always generate an lvalue datum, because this pointer doesn't own
1702 // the data and cleanup is scheduled elsewhere.
1703 DatumBlock
::new(bcx
, Datum
::new(sub_datum
.val
, ty
, LvalueExpr(sub_datum
.kind
)))
1705 // Sized value, ref to a thin pointer
1706 immediate_rvalue_bcx(bcx
, sub_datum
.val
, ty
).to_expr_datumblock()
1710 fn trans_scalar_binop
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1711 binop_expr
: &hir
::Expr
,
1714 lhs
: Datum
<'tcx
, Rvalue
>,
1715 rhs
: Datum
<'tcx
, Rvalue
>)
1716 -> DatumBlock
<'blk
, 'tcx
, Expr
>
1718 let _icx
= push_ctxt("trans_scalar_binop");
1720 let tcx
= bcx
.tcx();
1722 assert
!(!lhs_t
.is_simd());
1723 let is_float
= lhs_t
.is_fp();
1724 let is_signed
= lhs_t
.is_signed();
1725 let info
= expr_info(binop_expr
);
1727 let binop_debug_loc
= binop_expr
.debug_loc();
1730 let lhs
= lhs
.to_llscalarish(bcx
);
1731 let rhs
= rhs
.to_llscalarish(bcx
);
1732 let val
= match op
.node
{
1735 FAdd(bcx
, lhs
, rhs
, binop_debug_loc
)
1737 let (newbcx
, res
) = with_overflow_check(
1738 bcx
, OverflowOp
::Add
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1745 FSub(bcx
, lhs
, rhs
, binop_debug_loc
)
1747 let (newbcx
, res
) = with_overflow_check(
1748 bcx
, OverflowOp
::Sub
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1755 FMul(bcx
, lhs
, rhs
, binop_debug_loc
)
1757 let (newbcx
, res
) = with_overflow_check(
1758 bcx
, OverflowOp
::Mul
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1765 FDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1767 // Only zero-check integers; fp /0 is NaN
1768 bcx
= base
::fail_if_zero_or_overflows(bcx
,
1769 expr_info(binop_expr
),
1775 SDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1777 UDiv(bcx
, lhs
, rhs
, binop_debug_loc
)
1783 // LLVM currently always lowers the `frem` instructions appropriate
1784 // library calls typically found in libm. Notably f64 gets wired up
1785 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1786 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1787 // instead just an inline function in a header that goes up to a
1788 // f64, uses `fmod`, and then comes back down to a f32.
1790 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1791 // still unconditionally lower frem instructions over 32-bit floats
1792 // to a call to `fmodf`. To work around this we special case MSVC
1793 // 32-bit float rem instructions and instead do the call out to
1794 // `fmod` ourselves.
1796 // Note that this is currently duplicated with src/libcore/ops.rs
1797 // which does the same thing, and it would be nice to perhaps unify
1798 // these two implementations on day! Also note that we call `fmod`
1799 // for both 32 and 64-bit floats because if we emit any FRem
1800 // instruction at all then LLVM is capable of optimizing it into a
1801 // 32-bit FRem (which we're trying to avoid).
1802 let use_fmod
= tcx
.sess
.target
.target
.options
.is_like_msvc
&&
1803 tcx
.sess
.target
.target
.arch
== "x86";
1805 let f64t
= Type
::f64(bcx
.ccx());
1806 let fty
= Type
::func(&[f64t
, f64t
], &f64t
);
1807 let llfn
= declare
::declare_cfn(bcx
.ccx(), "fmod", fty
,
1809 if lhs_t
== tcx
.types
.f32 {
1810 let lhs
= FPExt(bcx
, lhs
, f64t
);
1811 let rhs
= FPExt(bcx
, rhs
, f64t
);
1812 let res
= Call(bcx
, llfn
, &[lhs
, rhs
], None
, binop_debug_loc
);
1813 FPTrunc(bcx
, res
, Type
::f32(bcx
.ccx()))
1815 Call(bcx
, llfn
, &[lhs
, rhs
], None
, binop_debug_loc
)
1818 FRem(bcx
, lhs
, rhs
, binop_debug_loc
)
1821 // Only zero-check integers; fp %0 is NaN
1822 bcx
= base
::fail_if_zero_or_overflows(bcx
,
1823 expr_info(binop_expr
),
1824 op
, lhs
, rhs
, lhs_t
);
1826 SRem(bcx
, lhs
, rhs
, binop_debug_loc
)
1828 URem(bcx
, lhs
, rhs
, binop_debug_loc
)
1832 hir
::BiBitOr
=> Or(bcx
, lhs
, rhs
, binop_debug_loc
),
1833 hir
::BiBitAnd
=> And(bcx
, lhs
, rhs
, binop_debug_loc
),
1834 hir
::BiBitXor
=> Xor(bcx
, lhs
, rhs
, binop_debug_loc
),
1836 let (newbcx
, res
) = with_overflow_check(
1837 bcx
, OverflowOp
::Shl
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1842 let (newbcx
, res
) = with_overflow_check(
1843 bcx
, OverflowOp
::Shr
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
);
1847 hir
::BiEq
| hir
::BiNe
| hir
::BiLt
| hir
::BiGe
| hir
::BiLe
| hir
::BiGt
=> {
1848 base
::compare_scalar_types(bcx
, lhs
, rhs
, lhs_t
, op
.node
, binop_debug_loc
)
1851 bcx
.tcx().sess
.span_bug(binop_expr
.span
, "unexpected binop");
1855 immediate_rvalue_bcx(bcx
, val
, binop_ty
).to_expr_datumblock()
1858 // refinement types would obviate the need for this
1859 enum lazy_binop_ty
{
1864 fn trans_lazy_binop
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1865 binop_expr
: &hir
::Expr
,
1869 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1870 let _icx
= push_ctxt("trans_lazy_binop");
1871 let binop_ty
= expr_ty(bcx
, binop_expr
);
1874 let DatumBlock {bcx: past_lhs, datum: lhs}
= trans(bcx
, a
);
1875 let lhs
= lhs
.to_llscalarish(past_lhs
);
1877 if past_lhs
.unreachable
.get() {
1878 return immediate_rvalue_bcx(past_lhs
, lhs
, binop_ty
).to_expr_datumblock();
1881 let join
= fcx
.new_id_block("join", binop_expr
.id
);
1882 let before_rhs
= fcx
.new_id_block("before_rhs", b
.id
);
1885 lazy_and
=> CondBr(past_lhs
, lhs
, before_rhs
.llbb
, join
.llbb
, DebugLoc
::None
),
1886 lazy_or
=> CondBr(past_lhs
, lhs
, join
.llbb
, before_rhs
.llbb
, DebugLoc
::None
)
1889 let DatumBlock {bcx: past_rhs, datum: rhs}
= trans(before_rhs
, b
);
1890 let rhs
= rhs
.to_llscalarish(past_rhs
);
1892 if past_rhs
.unreachable
.get() {
1893 return immediate_rvalue_bcx(join
, lhs
, binop_ty
).to_expr_datumblock();
1896 Br(past_rhs
, join
.llbb
, DebugLoc
::None
);
1897 let phi
= Phi(join
, Type
::i1(bcx
.ccx()), &[lhs
, rhs
],
1898 &[past_lhs
.llbb
, past_rhs
.llbb
]);
1900 return immediate_rvalue_bcx(join
, phi
, binop_ty
).to_expr_datumblock();
1903 fn trans_binary
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1908 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
1909 let _icx
= push_ctxt("trans_binary");
1910 let ccx
= bcx
.ccx();
1912 // if overloaded, would be RvalueDpsExpr
1913 assert
!(!ccx
.tcx().is_method_call(expr
.id
));
1917 trans_lazy_binop(bcx
, expr
, lazy_and
, lhs
, rhs
)
1920 trans_lazy_binop(bcx
, expr
, lazy_or
, lhs
, rhs
)
1924 let binop_ty
= expr_ty(bcx
, expr
);
1926 let lhs
= unpack_datum
!(bcx
, trans(bcx
, lhs
));
1927 let lhs
= unpack_datum
!(bcx
, lhs
.to_rvalue_datum(bcx
, "binop_lhs"));
1928 debug
!("trans_binary (expr {}): lhs={}",
1929 expr
.id
, lhs
.to_string(ccx
));
1930 let rhs
= unpack_datum
!(bcx
, trans(bcx
, rhs
));
1931 let rhs
= unpack_datum
!(bcx
, rhs
.to_rvalue_datum(bcx
, "binop_rhs"));
1932 debug
!("trans_binary (expr {}): rhs={}",
1933 expr
.id
, rhs
.to_string(ccx
));
1935 if type_is_fat_ptr(ccx
.tcx(), lhs
.ty
) {
1936 assert
!(type_is_fat_ptr(ccx
.tcx(), rhs
.ty
),
1937 "built-in binary operators on fat pointers are homogeneous");
1938 assert_eq
!(binop_ty
, bcx
.tcx().types
.bool
);
1939 let val
= base
::compare_scalar_types(
1946 immediate_rvalue_bcx(bcx
, val
, binop_ty
).to_expr_datumblock()
1948 assert
!(!type_is_fat_ptr(ccx
.tcx(), rhs
.ty
),
1949 "built-in binary operators on fat pointers are homogeneous");
1950 trans_scalar_binop(bcx
, expr
, binop_ty
, op
, lhs
, rhs
)
1956 fn trans_overloaded_op
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1958 method_call
: MethodCall
,
1959 lhs
: Datum
<'tcx
, Expr
>,
1960 rhs
: Option
<(Datum
<'tcx
, Expr
>, ast
::NodeId
)>,
1963 -> Result
<'blk
, 'tcx
> {
1964 callee
::trans_call_inner(bcx
,
1966 |bcx
, arg_cleanup_scope
| {
1967 meth
::trans_method_callee(bcx
,
1972 callee
::ArgOverloadedOp(lhs
, rhs
, autoref
),
1976 fn trans_overloaded_call
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
1978 callee
: &'a hir
::Expr
,
1979 args
: &'a
[P
<hir
::Expr
>],
1981 -> Block
<'blk
, 'tcx
> {
1982 debug
!("trans_overloaded_call {}", expr
.id
);
1983 let method_call
= MethodCall
::expr(expr
.id
);
1984 let mut all_args
= vec
!(callee
);
1985 all_args
.extend(args
.iter().map(|e
| &**e
));
1987 callee
::trans_call_inner(bcx
,
1989 |bcx
, arg_cleanup_scope
| {
1990 meth
::trans_method_callee(
1996 callee
::ArgOverloadedCall(all_args
),
2001 pub fn cast_is_noop
<'tcx
>(tcx
: &ty
::ctxt
<'tcx
>,
2006 if let Some(&CastKind
::CoercionCast
) = tcx
.cast_kinds
.borrow().get(&expr
.id
) {
2010 match (t_in
.builtin_deref(true, ty
::NoPreference
),
2011 t_out
.builtin_deref(true, ty
::NoPreference
)) {
2012 (Some(ty
::TypeAndMut{ ty: t_in, .. }
), Some(ty
::TypeAndMut{ ty: t_out, .. }
)) => {
2016 // This condition isn't redundant with the check for CoercionCast:
2017 // different types can be substituted into the same type, and
2018 // == equality can be overconservative if there are regions.
2024 fn trans_imm_cast
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2027 -> DatumBlock
<'blk
, 'tcx
, Expr
>
2029 use middle
::ty
::cast
::CastTy
::*;
2030 use middle
::ty
::cast
::IntTy
::*;
2032 fn int_cast(bcx
: Block
,
2039 let _icx
= push_ctxt("int_cast");
2040 let srcsz
= llsrctype
.int_width();
2041 let dstsz
= lldsttype
.int_width();
2042 return if dstsz
== srcsz
{
2043 BitCast(bcx
, llsrc
, lldsttype
)
2044 } else if srcsz
> dstsz
{
2045 TruncOrBitCast(bcx
, llsrc
, lldsttype
)
2047 SExtOrBitCast(bcx
, llsrc
, lldsttype
)
2049 ZExtOrBitCast(bcx
, llsrc
, lldsttype
)
2053 fn float_cast(bcx
: Block
,
2059 let _icx
= push_ctxt("float_cast");
2060 let srcsz
= llsrctype
.float_width();
2061 let dstsz
= lldsttype
.float_width();
2062 return if dstsz
> srcsz
{
2063 FPExt(bcx
, llsrc
, lldsttype
)
2064 } else if srcsz
> dstsz
{
2065 FPTrunc(bcx
, llsrc
, lldsttype
)
2069 let _icx
= push_ctxt("trans_cast");
2071 let ccx
= bcx
.ccx();
2073 let t_in
= expr_ty_adjusted(bcx
, expr
);
2074 let t_out
= node_id_type(bcx
, id
);
2076 debug
!("trans_cast({:?} as {:?})", t_in
, t_out
);
2077 let mut ll_t_in
= type_of
::arg_type_of(ccx
, t_in
);
2078 let ll_t_out
= type_of
::arg_type_of(ccx
, t_out
);
2079 // Convert the value to be cast into a ValueRef, either by-ref or
2080 // by-value as appropriate given its type:
2081 let mut datum
= unpack_datum
!(bcx
, trans(bcx
, expr
));
2083 let datum_ty
= monomorphize_type(bcx
, datum
.ty
);
2085 if cast_is_noop(bcx
.tcx(), expr
, datum_ty
, t_out
) {
2087 return DatumBlock
::new(bcx
, datum
);
2090 if type_is_fat_ptr(bcx
.tcx(), t_in
) {
2091 assert
!(datum
.kind
.is_by_ref());
2092 if type_is_fat_ptr(bcx
.tcx(), t_out
) {
2093 return DatumBlock
::new(bcx
, Datum
::new(
2094 PointerCast(bcx
, datum
.val
, ll_t_out
.ptr_to()),
2097 )).to_expr_datumblock();
2099 // Return the address
2100 return immediate_rvalue_bcx(bcx
,
2102 Load(bcx
, get_dataptr(bcx
, datum
.val
)),
2104 t_out
).to_expr_datumblock();
2108 let r_t_in
= CastTy
::from_ty(t_in
).expect("bad input type for cast");
2109 let r_t_out
= CastTy
::from_ty(t_out
).expect("bad output type for cast");
2111 let (llexpr
, signed
) = if let Int(CEnum
) = r_t_in
{
2112 let repr
= adt
::represent_type(ccx
, t_in
);
2113 let datum
= unpack_datum
!(
2114 bcx
, datum
.to_lvalue_datum(bcx
, "trans_imm_cast", expr
.id
));
2115 let llexpr_ptr
= datum
.to_llref();
2116 let discr
= adt
::trans_get_discr(bcx
, &repr
, llexpr_ptr
,
2117 Some(Type
::i64(ccx
)), true);
2118 ll_t_in
= val_ty(discr
);
2119 (discr
, adt
::is_discr_signed(&repr
))
2121 (datum
.to_llscalarish(bcx
), t_in
.is_signed())
2124 let newval
= match (r_t_in
, r_t_out
) {
2125 (Ptr(_
), Ptr(_
)) | (FnPtr
, Ptr(_
)) | (RPtr(_
), Ptr(_
)) => {
2126 PointerCast(bcx
, llexpr
, ll_t_out
)
2128 (Ptr(_
), Int(_
)) | (FnPtr
, Int(_
)) => PtrToInt(bcx
, llexpr
, ll_t_out
),
2129 (Int(_
), Ptr(_
)) => IntToPtr(bcx
, llexpr
, ll_t_out
),
2131 (Int(_
), Int(_
)) => int_cast(bcx
, ll_t_out
, ll_t_in
, llexpr
, signed
),
2132 (Float
, Float
) => float_cast(bcx
, ll_t_out
, ll_t_in
, llexpr
),
2133 (Int(_
), Float
) if signed
=> SIToFP(bcx
, llexpr
, ll_t_out
),
2134 (Int(_
), Float
) => UIToFP(bcx
, llexpr
, ll_t_out
),
2135 (Float
, Int(I
)) => FPToSI(bcx
, llexpr
, ll_t_out
),
2136 (Float
, Int(_
)) => FPToUI(bcx
, llexpr
, ll_t_out
),
2138 _
=> ccx
.sess().span_bug(expr
.span
,
2139 &format
!("translating unsupported cast: \
2145 return immediate_rvalue_bcx(bcx
, newval
, t_out
).to_expr_datumblock();
2148 fn trans_assign_op
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2153 -> Block
<'blk
, 'tcx
> {
2154 let _icx
= push_ctxt("trans_assign_op");
2157 debug
!("trans_assign_op(expr={:?})", expr
);
2159 // User-defined operator methods cannot be used with `+=` etc right now
2160 assert
!(!bcx
.tcx().is_method_call(expr
.id
));
2162 // Evaluate LHS (destination), which should be an lvalue
2163 let dst
= unpack_datum
!(bcx
, trans_to_lvalue(bcx
, dst
, "assign_op"));
2164 assert
!(!bcx
.fcx
.type_needs_drop(dst
.ty
));
2165 let lhs
= load_ty(bcx
, dst
.val
, dst
.ty
);
2166 let lhs
= immediate_rvalue(lhs
, dst
.ty
);
2168 // Evaluate RHS - FIXME(#28160) this sucks
2169 let rhs
= unpack_datum
!(bcx
, trans(bcx
, &src
));
2170 let rhs
= unpack_datum
!(bcx
, rhs
.to_rvalue_datum(bcx
, "assign_op_rhs"));
2172 // Perform computation and store the result
2173 let result_datum
= unpack_datum
!(
2174 bcx
, trans_scalar_binop(bcx
, expr
, dst
.ty
, op
, lhs
, rhs
));
2175 return result_datum
.store_to(bcx
, dst
.val
);
2178 fn auto_ref
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2179 datum
: Datum
<'tcx
, Expr
>,
2181 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2184 // Ensure cleanup of `datum` if not already scheduled and obtain
2185 // a "by ref" pointer.
2186 let lv_datum
= unpack_datum
!(bcx
, datum
.to_lvalue_datum(bcx
, "autoref", expr
.id
));
2188 // Compute final type. Note that we are loose with the region and
2189 // mutability, since those things don't matter in trans.
2190 let referent_ty
= lv_datum
.ty
;
2191 let ptr_ty
= bcx
.tcx().mk_imm_ref(bcx
.tcx().mk_region(ty
::ReStatic
), referent_ty
);
2193 // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
2194 // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
2195 // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
2196 // indirection and for thin pointers, this has no ill effects.
2197 let kind
= if type_is_sized(bcx
.tcx(), referent_ty
) {
2198 RvalueExpr(Rvalue
::new(ByValue
))
2200 LvalueExpr(lv_datum
.kind
)
2204 let llref
= lv_datum
.to_llref();
2205 DatumBlock
::new(bcx
, Datum
::new(llref
, ptr_ty
, kind
))
2208 fn deref_multiple
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2210 datum
: Datum
<'tcx
, Expr
>,
2212 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2214 let mut datum
= datum
;
2216 let method_call
= MethodCall
::autoderef(expr
.id
, i
as u32);
2217 datum
= unpack_datum
!(bcx
, deref_once(bcx
, expr
, datum
, method_call
));
2219 DatumBlock { bcx: bcx, datum: datum }
2222 fn deref_once
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2224 datum
: Datum
<'tcx
, Expr
>,
2225 method_call
: MethodCall
)
2226 -> DatumBlock
<'blk
, 'tcx
, Expr
> {
2227 let ccx
= bcx
.ccx();
2229 debug
!("deref_once(expr={:?}, datum={}, method_call={:?})",
2231 datum
.to_string(ccx
),
2236 // Check for overloaded deref.
2237 let method_ty
= ccx
.tcx()
2241 .get(&method_call
).map(|method
| method
.ty
);
2243 let datum
= match method_ty
{
2244 Some(method_ty
) => {
2245 let method_ty
= monomorphize_type(bcx
, method_ty
);
2247 // Overloaded. Evaluate `trans_overloaded_op`, which will
2248 // invoke the user's deref() method, which basically
2249 // converts from the `Smaht<T>` pointer that we have into
2250 // a `&T` pointer. We can then proceed down the normal
2251 // path (below) to dereference that `&T`.
2252 let datum
= if method_call
.autoderef
== 0 {
2255 // Always perform an AutoPtr when applying an overloaded auto-deref
2256 unpack_datum
!(bcx
, auto_ref(bcx
, datum
, expr
))
2259 let ref_ty
= // invoked methods have their LB regions instantiated
2260 ccx
.tcx().no_late_bound_regions(&method_ty
.fn_ret()).unwrap().unwrap();
2261 let scratch
= rvalue_scratch_datum(bcx
, ref_ty
, "overloaded_deref");
2263 unpack_result
!(bcx
, trans_overloaded_op(bcx
, expr
, method_call
,
2264 datum
, None
, Some(SaveIn(scratch
.val
)),
2266 scratch
.to_expr_datum()
2269 // Not overloaded. We already have a pointer we know how to deref.
2274 let r
= match datum
.ty
.sty
{
2275 ty
::TyBox(content_ty
) => {
2276 // Make sure we have an lvalue datum here to get the
2277 // proper cleanups scheduled
2278 let datum
= unpack_datum
!(
2279 bcx
, datum
.to_lvalue_datum(bcx
, "deref", expr
.id
));
2281 if type_is_sized(bcx
.tcx(), content_ty
) {
2282 let ptr
= load_ty(bcx
, datum
.val
, datum
.ty
);
2283 DatumBlock
::new(bcx
, Datum
::new(ptr
, content_ty
, LvalueExpr(datum
.kind
)))
2285 // A fat pointer and a DST lvalue have the same representation
2286 // just different types. Since there is no temporary for `*e`
2287 // here (because it is unsized), we cannot emulate the sized
2288 // object code path for running drop glue and free. Instead,
2289 // we schedule cleanup for `e`, turning it into an lvalue.
2291 let lval
= Lvalue
::new("expr::deref_once ty_uniq");
2292 let datum
= Datum
::new(datum
.val
, content_ty
, LvalueExpr(lval
));
2293 DatumBlock
::new(bcx
, datum
)
2297 ty
::TyRawPtr(ty
::TypeAndMut { ty: content_ty, .. }
) |
2298 ty
::TyRef(_
, ty
::TypeAndMut { ty: content_ty, .. }
) => {
2299 let lval
= Lvalue
::new("expr::deref_once ptr");
2300 if type_is_sized(bcx
.tcx(), content_ty
) {
2301 let ptr
= datum
.to_llscalarish(bcx
);
2303 // Always generate an lvalue datum, even if datum.mode is
2304 // an rvalue. This is because datum.mode is only an
2305 // rvalue for non-owning pointers like &T or *T, in which
2306 // case cleanup *is* scheduled elsewhere, by the true
2307 // owner (or, in the case of *T, by the user).
2308 DatumBlock
::new(bcx
, Datum
::new(ptr
, content_ty
, LvalueExpr(lval
)))
2310 // A fat pointer and a DST lvalue have the same representation
2311 // just different types.
2312 DatumBlock
::new(bcx
, Datum
::new(datum
.val
, content_ty
, LvalueExpr(lval
)))
2317 bcx
.tcx().sess
.span_bug(
2319 &format
!("deref invoked on expr of invalid type {:?}",
2324 debug
!("deref_once(expr={}, method_call={:?}, result={})",
2325 expr
.id
, method_call
, r
.datum
.to_string(ccx
));
2340 fn codegen_strategy(&self) -> OverflowCodegen
{
2341 use self::OverflowCodegen
::{ViaIntrinsic, ViaInputCheck}
;
2343 OverflowOp
::Add
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Add
),
2344 OverflowOp
::Sub
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Sub
),
2345 OverflowOp
::Mul
=> ViaIntrinsic(OverflowOpViaIntrinsic
::Mul
),
2347 OverflowOp
::Shl
=> ViaInputCheck(OverflowOpViaInputCheck
::Shl
),
2348 OverflowOp
::Shr
=> ViaInputCheck(OverflowOpViaInputCheck
::Shr
),
2353 enum OverflowCodegen
{
2354 ViaIntrinsic(OverflowOpViaIntrinsic
),
2355 ViaInputCheck(OverflowOpViaInputCheck
),
2358 enum OverflowOpViaInputCheck { Shl, Shr, }
2361 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2363 impl OverflowOpViaIntrinsic
{
2364 fn to_intrinsic
<'blk
, 'tcx
>(&self, bcx
: Block
<'blk
, 'tcx
>, lhs_ty
: Ty
) -> ValueRef
{
2365 let name
= self.to_intrinsic_name(bcx
.tcx(), lhs_ty
);
2366 bcx
.ccx().get_intrinsic(&name
)
2368 fn to_intrinsic_name(&self, tcx
: &ty
::ctxt
, ty
: Ty
) -> &'
static str {
2369 use syntax
::ast
::IntTy
::*;
2370 use syntax
::ast
::UintTy
::*;
2371 use middle
::ty
::{TyInt, TyUint}
;
2373 let new_sty
= match ty
.sty
{
2374 TyInt(Is
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
2377 _
=> panic
!("unsupported target word size")
2379 TyUint(Us
) => match &tcx
.sess
.target
.target
.target_pointer_width
[..] {
2380 "32" => TyUint(U32
),
2381 "64" => TyUint(U64
),
2382 _
=> panic
!("unsupported target word size")
2384 ref t @
TyUint(_
) | ref t @
TyInt(_
) => t
.clone(),
2385 _
=> panic
!("tried to get overflow intrinsic for {:?} applied to non-int type",
2390 OverflowOpViaIntrinsic
::Add
=> match new_sty
{
2391 TyInt(I8
) => "llvm.sadd.with.overflow.i8",
2392 TyInt(I16
) => "llvm.sadd.with.overflow.i16",
2393 TyInt(I32
) => "llvm.sadd.with.overflow.i32",
2394 TyInt(I64
) => "llvm.sadd.with.overflow.i64",
2396 TyUint(U8
) => "llvm.uadd.with.overflow.i8",
2397 TyUint(U16
) => "llvm.uadd.with.overflow.i16",
2398 TyUint(U32
) => "llvm.uadd.with.overflow.i32",
2399 TyUint(U64
) => "llvm.uadd.with.overflow.i64",
2401 _
=> unreachable
!(),
2403 OverflowOpViaIntrinsic
::Sub
=> match new_sty
{
2404 TyInt(I8
) => "llvm.ssub.with.overflow.i8",
2405 TyInt(I16
) => "llvm.ssub.with.overflow.i16",
2406 TyInt(I32
) => "llvm.ssub.with.overflow.i32",
2407 TyInt(I64
) => "llvm.ssub.with.overflow.i64",
2409 TyUint(U8
) => "llvm.usub.with.overflow.i8",
2410 TyUint(U16
) => "llvm.usub.with.overflow.i16",
2411 TyUint(U32
) => "llvm.usub.with.overflow.i32",
2412 TyUint(U64
) => "llvm.usub.with.overflow.i64",
2414 _
=> unreachable
!(),
2416 OverflowOpViaIntrinsic
::Mul
=> match new_sty
{
2417 TyInt(I8
) => "llvm.smul.with.overflow.i8",
2418 TyInt(I16
) => "llvm.smul.with.overflow.i16",
2419 TyInt(I32
) => "llvm.smul.with.overflow.i32",
2420 TyInt(I64
) => "llvm.smul.with.overflow.i64",
2422 TyUint(U8
) => "llvm.umul.with.overflow.i8",
2423 TyUint(U16
) => "llvm.umul.with.overflow.i16",
2424 TyUint(U32
) => "llvm.umul.with.overflow.i32",
2425 TyUint(U64
) => "llvm.umul.with.overflow.i64",
2427 _
=> unreachable
!(),
2432 fn build_intrinsic_call
<'blk
, 'tcx
>(&self, bcx
: Block
<'blk
, 'tcx
>,
2433 info
: NodeIdAndSpan
,
2434 lhs_t
: Ty
<'tcx
>, lhs
: ValueRef
,
2436 binop_debug_loc
: DebugLoc
)
2437 -> (Block
<'blk
, 'tcx
>, ValueRef
) {
2438 let llfn
= self.to_intrinsic(bcx
, lhs_t
);
2440 let val
= Call(bcx
, llfn
, &[lhs
, rhs
], None
, binop_debug_loc
);
2441 let result
= ExtractValue(bcx
, val
, 0); // iN operation result
2442 let overflow
= ExtractValue(bcx
, val
, 1); // i1 "did it overflow?"
2444 let cond
= ICmp(bcx
, llvm
::IntEQ
, overflow
, C_integral(Type
::i1(bcx
.ccx()), 1, false),
2447 let expect
= bcx
.ccx().get_intrinsic(&"llvm.expect.i1");
2448 Call(bcx
, expect
, &[cond
, C_integral(Type
::i1(bcx
.ccx()), 0, false)],
2449 None
, binop_debug_loc
);
2452 base
::with_cond(bcx
, cond
, |bcx
|
2453 controlflow
::trans_fail(bcx
, info
,
2454 InternedString
::new("arithmetic operation overflowed")));
2460 impl OverflowOpViaInputCheck
{
2461 fn build_with_input_check
<'blk
, 'tcx
>(&self,
2462 bcx
: Block
<'blk
, 'tcx
>,
2463 info
: NodeIdAndSpan
,
2467 binop_debug_loc
: DebugLoc
)
2468 -> (Block
<'blk
, 'tcx
>, ValueRef
)
2470 let lhs_llty
= val_ty(lhs
);
2471 let rhs_llty
= val_ty(rhs
);
2473 // Panic if any bits are set outside of bits that we always
2476 // Note that the mask's value is derived from the LHS type
2477 // (since that is where the 32/64 distinction is relevant) but
2478 // the mask's type must match the RHS type (since they will
2479 // both be fed into an and-binop)
2480 let invert_mask
= shift_mask_val(bcx
, lhs_llty
, rhs_llty
, true);
2482 let outer_bits
= And(bcx
, rhs
, invert_mask
, binop_debug_loc
);
2483 let cond
= build_nonzero_check(bcx
, outer_bits
, binop_debug_loc
);
2484 let result
= match *self {
2485 OverflowOpViaInputCheck
::Shl
=>
2486 build_unchecked_lshift(bcx
, lhs
, rhs
, binop_debug_loc
),
2487 OverflowOpViaInputCheck
::Shr
=>
2488 build_unchecked_rshift(bcx
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2491 base
::with_cond(bcx
, cond
, |bcx
|
2492 controlflow
::trans_fail(bcx
, info
,
2493 InternedString
::new("shift operation overflowed")));
2499 // Check if an integer or vector contains a nonzero element.
2500 fn build_nonzero_check
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
2502 binop_debug_loc
: DebugLoc
) -> ValueRef
{
2503 let llty
= val_ty(value
);
2504 let kind
= llty
.kind();
2506 TypeKind
::Integer
=> ICmp(bcx
, llvm
::IntNE
, value
, C_null(llty
), binop_debug_loc
),
2507 TypeKind
::Vector
=> {
2508 // Check if any elements of the vector are nonzero by treating
2509 // it as a wide integer and checking if the integer is nonzero.
2510 let width
= llty
.vector_length() as u64 * llty
.element_type().int_width();
2511 let int_value
= BitCast(bcx
, value
, Type
::ix(bcx
.ccx(), width
));
2512 build_nonzero_check(bcx
, int_value
, binop_debug_loc
)
2514 _
=> panic
!("build_nonzero_check: expected Integer or Vector, found {:?}", kind
),
2518 fn with_overflow_check
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, oop
: OverflowOp
, info
: NodeIdAndSpan
,
2519 lhs_t
: Ty
<'tcx
>, lhs
: ValueRef
,
2521 binop_debug_loc
: DebugLoc
)
2522 -> (Block
<'blk
, 'tcx
>, ValueRef
) {
2523 if bcx
.unreachable
.get() { return (bcx, _Undef(lhs)); }
2524 if bcx
.ccx().check_overflow() {
2526 match oop
.codegen_strategy() {
2527 OverflowCodegen
::ViaIntrinsic(oop
) =>
2528 oop
.build_intrinsic_call(bcx
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2529 OverflowCodegen
::ViaInputCheck(oop
) =>
2530 oop
.build_with_input_check(bcx
, info
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2533 let res
= match oop
{
2534 OverflowOp
::Add
=> Add(bcx
, lhs
, rhs
, binop_debug_loc
),
2535 OverflowOp
::Sub
=> Sub(bcx
, lhs
, rhs
, binop_debug_loc
),
2536 OverflowOp
::Mul
=> Mul(bcx
, lhs
, rhs
, binop_debug_loc
),
2539 build_unchecked_lshift(bcx
, lhs
, rhs
, binop_debug_loc
),
2541 build_unchecked_rshift(bcx
, lhs_t
, lhs
, rhs
, binop_debug_loc
),
2547 /// We categorize expressions into three kinds. The distinction between
2548 /// lvalue/rvalue is fundamental to the language. The distinction between the
2549 /// two kinds of rvalues is an artifact of trans which reflects how we will
2550 /// generate code for that kind of expression. See trans/expr.rs for more
2552 #[derive(Copy, Clone)]
2560 fn expr_kind(tcx
: &ty
::ctxt
, expr
: &hir
::Expr
) -> ExprKind
{
2561 if tcx
.is_method_call(expr
.id
) {
2562 // Overloaded operations are generally calls, and hence they are
2563 // generated via DPS, but there are a few exceptions:
2564 return match expr
.node
{
2565 // `a += b` has a unit result.
2566 hir
::ExprAssignOp(..) => ExprKind
::RvalueStmt
,
2568 // the deref method invoked for `*a` always yields an `&T`
2569 hir
::ExprUnary(hir
::UnDeref
, _
) => ExprKind
::Lvalue
,
2571 // the index method invoked for `a[i]` always yields an `&T`
2572 hir
::ExprIndex(..) => ExprKind
::Lvalue
,
2574 // in the general case, result could be any type, use DPS
2575 _
=> ExprKind
::RvalueDps
2580 hir
::ExprPath(..) => {
2581 match tcx
.resolve_expr(expr
) {
2582 Def
::Struct(..) | Def
::Variant(..) => {
2583 if let ty
::TyBareFn(..) = tcx
.node_id_to_type(expr
.id
).sty
{
2585 ExprKind
::RvalueDatum
2591 // Fn pointers are just scalar values.
2592 Def
::Fn(..) | Def
::Method(..) => ExprKind
::RvalueDatum
,
2594 // Note: there is actually a good case to be made that
2595 // DefArg's, particularly those of immediate type, ought to
2596 // considered rvalues.
2599 Def
::Local(..) => ExprKind
::Lvalue
,
2602 Def
::AssociatedConst(..) => ExprKind
::RvalueDatum
,
2607 &format
!("uncategorized def for expr {}: {:?}",
2614 hir
::ExprType(ref expr
, _
) => {
2615 expr_kind(tcx
, expr
)
2618 hir
::ExprUnary(hir
::UnDeref
, _
) |
2619 hir
::ExprField(..) |
2620 hir
::ExprTupField(..) |
2621 hir
::ExprIndex(..) => {
2626 hir
::ExprMethodCall(..) |
2627 hir
::ExprStruct(..) |
2628 hir
::ExprRange(..) |
2631 hir
::ExprMatch(..) |
2632 hir
::ExprClosure(..) |
2633 hir
::ExprBlock(..) |
2634 hir
::ExprRepeat(..) |
2635 hir
::ExprVec(..) => {
2639 hir
::ExprLit(ref lit
) if lit
.node
.is_str() => {
2643 hir
::ExprBreak(..) |
2644 hir
::ExprAgain(..) |
2646 hir
::ExprWhile(..) |
2648 hir
::ExprAssign(..) |
2649 hir
::ExprInlineAsm(..) |
2650 hir
::ExprAssignOp(..) => {
2651 ExprKind
::RvalueStmt
2654 hir
::ExprLit(_
) | // Note: LitStr is carved out above
2655 hir
::ExprUnary(..) |
2657 hir
::ExprAddrOf(..) |
2658 hir
::ExprBinary(..) |
2659 hir
::ExprCast(..) => {
2660 ExprKind
::RvalueDatum