1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use rustc
::middle
::ty
::{self, Ty}
;
13 use rustc
::mir
::repr
as mir
;
18 use trans
::common
::{self, Block, Result}
;
19 use trans
::debuginfo
::DebugLoc
;
23 use trans
::type_
::Type
;
27 use super::MirContext
;
28 use super::operand
::{OperandRef, OperandValue}
;
30 impl<'bcx
, 'tcx
> MirContext
<'bcx
, 'tcx
> {
31 pub fn trans_rvalue(&mut self,
32 bcx
: Block
<'bcx
, 'tcx
>,
34 rvalue
: &mir
::Rvalue
<'tcx
>)
37 debug
!("trans_rvalue(lldest={}, rvalue={:?})",
38 bcx
.val_to_string(lldest
),
42 mir
::Rvalue
::Use(ref operand
) => {
43 self.trans_operand_into(bcx
, lldest
, operand
);
47 mir
::Rvalue
::Cast(mir
::CastKind
::Unsize
, ref operand
, cast_ty
) => {
48 if common
::type_is_fat_ptr(bcx
.tcx(), cast_ty
) {
49 // into-coerce of a thin pointer to a fat pointer - just
50 // use the operand path.
51 let (bcx
, temp
) = self.trans_rvalue_operand(bcx
, rvalue
);
52 self.store_operand(bcx
, lldest
, temp
);
56 // Unsize of a nontrivial struct. I would prefer for
57 // this to be eliminated by MIR translation, but
58 // `CoerceUnsized` can be passed by a where-clause,
59 // so the (generic) MIR may not be able to expand it.
60 let operand
= self.trans_operand(bcx
, operand
);
62 OperandValue
::FatPtr(..) => unreachable
!(),
63 OperandValue
::Immediate(llval
) => {
64 // unsize from an immediate structure. We don't
65 // really need a temporary alloca here, but
66 // avoiding it would require us to have
67 // `coerce_unsized_into` use extractvalue to
68 // index into the struct, and this case isn't
69 // important enough for it.
70 debug
!("trans_rvalue: creating ugly alloca");
71 let lltemp
= base
::alloc_ty(bcx
, operand
.ty
, "__unsize_temp");
72 base
::store_ty(bcx
, llval
, lltemp
, operand
.ty
);
73 base
::coerce_unsized_into(bcx
,
77 OperandValue
::Ref(llref
) => {
78 base
::coerce_unsized_into(bcx
,
86 mir
::Rvalue
::Repeat(ref elem
, ref count
) => {
87 let elem
= self.trans_operand(bcx
, elem
);
88 let size
= self.trans_constant(bcx
, count
).immediate();
89 let base
= expr
::get_dataptr(bcx
, lldest
);
90 tvec
::iter_vec_raw(bcx
, base
, elem
.ty
, size
, |bcx
, llslot
, _
| {
91 self.store_operand(bcx
, llslot
, elem
);
96 mir
::Rvalue
::Aggregate(_
, ref operands
) => {
97 for (i
, operand
) in operands
.iter().enumerate() {
98 // Note: perhaps this should be StructGep, but
99 // note that in some cases the values here will
100 // not be structs but arrays.
101 let lldest_i
= build
::GEPi(bcx
, lldest
, &[0, i
]);
102 self.trans_operand_into(bcx
, lldest_i
, operand
);
107 mir
::Rvalue
::Slice { ref input, from_start, from_end }
=> {
109 let input
= self.trans_lvalue(bcx
, input
);
110 let (llbase
, lllen
) = tvec
::get_base_and_len(bcx
,
112 input
.ty
.to_ty(bcx
.tcx()));
113 let llbase1
= build
::GEPi(bcx
, llbase
, &[from_start
]);
114 let adj
= common
::C_uint(ccx
, from_start
+ from_end
);
115 let lllen1
= build
::Sub(bcx
, lllen
, adj
, DebugLoc
::None
);
116 let lladdrdest
= expr
::get_dataptr(bcx
, lldest
);
117 build
::Store(bcx
, llbase1
, lladdrdest
);
118 let llmetadest
= expr
::get_meta(bcx
, lldest
);
119 build
::Store(bcx
, lllen1
, llmetadest
);
123 mir
::Rvalue
::InlineAsm(inline_asm
) => {
124 asm
::trans_inline_asm(bcx
, inline_asm
)
128 assert
!(rvalue_creates_operand(rvalue
));
129 let (bcx
, temp
) = self.trans_rvalue_operand(bcx
, rvalue
);
130 self.store_operand(bcx
, lldest
, temp
);
136 pub fn trans_rvalue_operand(&mut self,
137 bcx
: Block
<'bcx
, 'tcx
>,
138 rvalue
: &mir
::Rvalue
<'tcx
>)
139 -> (Block
<'bcx
, 'tcx
>, OperandRef
<'tcx
>)
141 assert
!(rvalue_creates_operand(rvalue
), "cannot trans {:?} to operand", rvalue
);
144 mir
::Rvalue
::Use(ref operand
) => {
145 let operand
= self.trans_operand(bcx
, operand
);
149 mir
::Rvalue
::Cast(ref kind
, ref operand
, cast_ty
) => {
150 let operand
= self.trans_operand(bcx
, operand
);
151 debug
!("cast operand is {}", operand
.repr(bcx
));
152 let cast_ty
= bcx
.monomorphize(&cast_ty
);
154 let val
= match *kind
{
155 mir
::CastKind
::ReifyFnPointer
|
156 mir
::CastKind
::UnsafeFnPointer
=> {
157 // these are no-ops at the LLVM level
160 mir
::CastKind
::Unsize
=> {
161 // unsize targets other than to a fat pointer currently
162 // can't be operands.
163 assert
!(common
::type_is_fat_ptr(bcx
.tcx(), cast_ty
));
166 OperandValue
::FatPtr(..) => {
167 // unsize from a fat pointer - this is a
168 // "trait-object-to-supertrait" coercion, for
170 // &'a fmt::Debug+Send => &'a fmt::Debug,
171 // and is a no-op at the LLVM level
174 OperandValue
::Immediate(lldata
) => {
176 let (lldata
, llextra
) =
177 base
::unsize_thin_ptr(bcx
, lldata
,
178 operand
.ty
, cast_ty
);
179 OperandValue
::FatPtr(lldata
, llextra
)
181 OperandValue
::Ref(_
) => {
183 &format
!("by-ref operand {} in trans_rvalue_operand",
188 mir
::CastKind
::Misc
=> unimplemented
!()
196 mir
::Rvalue
::Ref(_
, bk
, ref lvalue
) => {
197 let tr_lvalue
= self.trans_lvalue(bcx
, lvalue
);
199 let ty
= tr_lvalue
.ty
.to_ty(bcx
.tcx());
200 let ref_ty
= bcx
.tcx().mk_ref(
201 bcx
.tcx().mk_region(ty
::ReStatic
),
202 ty
::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
205 // Note: lvalues are indirect, so storing the `llval` into the
206 // destination effectively creates a reference.
207 if common
::type_is_sized(bcx
.tcx(), ty
) {
209 val
: OperandValue
::Immediate(tr_lvalue
.llval
),
214 val
: OperandValue
::FatPtr(tr_lvalue
.llval
,
221 mir
::Rvalue
::Len(ref lvalue
) => {
222 let tr_lvalue
= self.trans_lvalue(bcx
, lvalue
);
224 val
: OperandValue
::Immediate(self.lvalue_len(bcx
, tr_lvalue
)),
225 ty
: bcx
.tcx().types
.usize,
229 mir
::Rvalue
::BinaryOp(op
, ref lhs
, ref rhs
) => {
230 let lhs
= self.trans_operand(bcx
, lhs
);
231 let rhs
= self.trans_operand(bcx
, rhs
);
232 let llresult
= if common
::type_is_fat_ptr(bcx
.tcx(), lhs
.ty
) {
233 match (lhs
.val
, rhs
.val
) {
234 (OperandValue
::FatPtr(lhs_addr
, lhs_extra
),
235 OperandValue
::FatPtr(rhs_addr
, rhs_extra
)) => {
236 base
::compare_fat_ptrs(bcx
,
239 lhs
.ty
, op
.to_hir_binop(),
246 self.trans_scalar_binop(bcx
, op
,
247 lhs
.immediate(), rhs
.immediate(),
248 lhs
.ty
, DebugLoc
::None
)
251 val
: OperandValue
::Immediate(llresult
),
252 ty
: self.mir
.binop_ty(bcx
.tcx(), op
, lhs
.ty
, rhs
.ty
),
256 mir
::Rvalue
::UnaryOp(op
, ref operand
) => {
257 let operand
= self.trans_operand(bcx
, operand
);
258 let lloperand
= operand
.immediate();
259 let is_float
= operand
.ty
.is_fp();
260 let debug_loc
= DebugLoc
::None
;
261 let llval
= match op
{
262 mir
::UnOp
::Not
=> build
::Not(bcx
, lloperand
, debug_loc
),
263 mir
::UnOp
::Neg
=> if is_float
{
264 build
::FNeg(bcx
, lloperand
, debug_loc
)
266 build
::Neg(bcx
, lloperand
, debug_loc
)
270 val
: OperandValue
::Immediate(llval
),
275 mir
::Rvalue
::Box(content_ty
) => {
276 let content_ty
: Ty
<'tcx
> = bcx
.monomorphize(&content_ty
);
277 let llty
= type_of
::type_of(bcx
.ccx(), content_ty
);
278 let llsize
= machine
::llsize_of(bcx
.ccx(), llty
);
279 let align
= type_of
::align_of(bcx
.ccx(), content_ty
);
280 let llalign
= common
::C_uint(bcx
.ccx(), align
);
281 let llty_ptr
= llty
.ptr_to();
282 let box_ty
= bcx
.tcx().mk_box(content_ty
);
283 let Result { bcx, val: llval }
= base
::malloc_raw_dyn(bcx
,
290 val
: OperandValue
::Immediate(llval
),
295 mir
::Rvalue
::Repeat(..) |
296 mir
::Rvalue
::Aggregate(..) |
297 mir
::Rvalue
::Slice { .. }
|
298 mir
::Rvalue
::InlineAsm(..) => {
299 bcx
.tcx().sess
.bug(&format
!("cannot generate operand from rvalue {:?}", rvalue
));
304 pub fn trans_scalar_binop(&mut self,
305 bcx
: Block
<'bcx
, 'tcx
>,
310 debug_loc
: DebugLoc
) -> ValueRef
{
311 let is_float
= input_ty
.is_fp();
312 let is_signed
= input_ty
.is_signed();
314 mir
::BinOp
::Add
=> if is_float
{
315 build
::FAdd(bcx
, lhs
, rhs
, debug_loc
)
317 build
::Add(bcx
, lhs
, rhs
, debug_loc
)
319 mir
::BinOp
::Sub
=> if is_float
{
320 build
::FSub(bcx
, lhs
, rhs
, debug_loc
)
322 build
::Sub(bcx
, lhs
, rhs
, debug_loc
)
324 mir
::BinOp
::Mul
=> if is_float
{
325 build
::FMul(bcx
, lhs
, rhs
, debug_loc
)
327 build
::Mul(bcx
, lhs
, rhs
, debug_loc
)
329 mir
::BinOp
::Div
=> if is_float
{
330 build
::FDiv(bcx
, lhs
, rhs
, debug_loc
)
331 } else if is_signed
{
332 build
::SDiv(bcx
, lhs
, rhs
, debug_loc
)
334 build
::UDiv(bcx
, lhs
, rhs
, debug_loc
)
336 mir
::BinOp
::Rem
=> if is_float
{
337 // LLVM currently always lowers the `frem` instructions appropriate
338 // library calls typically found in libm. Notably f64 gets wired up
339 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
340 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
341 // instead just an inline function in a header that goes up to a
342 // f64, uses `fmod`, and then comes back down to a f32.
344 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
345 // still unconditionally lower frem instructions over 32-bit floats
346 // to a call to `fmodf`. To work around this we special case MSVC
347 // 32-bit float rem instructions and instead do the call out to
350 // Note that this is currently duplicated with src/libcore/ops.rs
351 // which does the same thing, and it would be nice to perhaps unify
352 // these two implementations one day! Also note that we call `fmod`
353 // for both 32 and 64-bit floats because if we emit any FRem
354 // instruction at all then LLVM is capable of optimizing it into a
355 // 32-bit FRem (which we're trying to avoid).
357 let use_fmod
= tcx
.sess
.target
.target
.options
.is_like_msvc
&&
358 tcx
.sess
.target
.target
.arch
== "x86";
360 let f64t
= Type
::f64(bcx
.ccx());
361 let fty
= Type
::func(&[f64t
, f64t
], &f64t
);
362 let llfn
= declare
::declare_cfn(bcx
.ccx(), "fmod", fty
,
364 if input_ty
== tcx
.types
.f32 {
365 let lllhs
= build
::FPExt(bcx
, lhs
, f64t
);
366 let llrhs
= build
::FPExt(bcx
, rhs
, f64t
);
367 let llres
= build
::Call(bcx
, llfn
, &[lllhs
, llrhs
],
369 build
::FPTrunc(bcx
, llres
, Type
::f32(bcx
.ccx()))
371 build
::Call(bcx
, llfn
, &[lhs
, rhs
],
375 build
::FRem(bcx
, lhs
, rhs
, debug_loc
)
377 } else if is_signed
{
378 build
::SRem(bcx
, lhs
, rhs
, debug_loc
)
380 build
::URem(bcx
, lhs
, rhs
, debug_loc
)
382 mir
::BinOp
::BitOr
=> build
::Or(bcx
, lhs
, rhs
, debug_loc
),
383 mir
::BinOp
::BitAnd
=> build
::And(bcx
, lhs
, rhs
, debug_loc
),
384 mir
::BinOp
::BitXor
=> build
::Xor(bcx
, lhs
, rhs
, debug_loc
),
385 mir
::BinOp
::Shl
=> common
::build_unchecked_lshift(bcx
,
389 mir
::BinOp
::Shr
=> common
::build_unchecked_rshift(bcx
,
394 mir
::BinOp
::Eq
| mir
::BinOp
::Lt
| mir
::BinOp
::Gt
|
395 mir
::BinOp
::Ne
| mir
::BinOp
::Le
| mir
::BinOp
::Ge
=> {
396 base
::compare_scalar_types(bcx
, lhs
, rhs
, input_ty
,
397 op
.to_hir_binop(), debug_loc
)
403 pub fn rvalue_creates_operand
<'tcx
>(rvalue
: &mir
::Rvalue
<'tcx
>) -> bool
{
405 mir
::Rvalue
::Use(..) | // (*)
406 mir
::Rvalue
::Ref(..) |
407 mir
::Rvalue
::Len(..) |
408 mir
::Rvalue
::Cast(..) | // (*)
409 mir
::Rvalue
::BinaryOp(..) |
410 mir
::Rvalue
::UnaryOp(..) |
411 mir
::Rvalue
::Box(..) =>
413 mir
::Rvalue
::Repeat(..) |
414 mir
::Rvalue
::Aggregate(..) |
415 mir
::Rvalue
::Slice { .. }
|
416 mir
::Rvalue
::InlineAsm(..) =>
420 // (*) this is only true if the type is suitable