]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/mir/rvalue.rs
Imported Upstream version 1.6.0+dfsg1
[rustc.git] / src / librustc_trans / trans / mir / rvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::ValueRef;
12 use rustc::middle::ty::{self, Ty};
13 use rustc::mir::repr as mir;
14
15 use trans::asm;
16 use trans::base;
17 use trans::build;
18 use trans::common::{self, Block, Result};
19 use trans::debuginfo::DebugLoc;
20 use trans::declare;
21 use trans::expr;
22 use trans::machine;
23 use trans::type_::Type;
24 use trans::type_of;
25 use trans::tvec;
26
27 use super::MirContext;
28 use super::operand::{OperandRef, OperandValue};
29
30 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
31 pub fn trans_rvalue(&mut self,
32 bcx: Block<'bcx, 'tcx>,
33 lldest: ValueRef,
34 rvalue: &mir::Rvalue<'tcx>)
35 -> Block<'bcx, 'tcx>
36 {
37 debug!("trans_rvalue(lldest={}, rvalue={:?})",
38 bcx.val_to_string(lldest),
39 rvalue);
40
41 match *rvalue {
42 mir::Rvalue::Use(ref operand) => {
43 self.trans_operand_into(bcx, lldest, operand);
44 bcx
45 }
46
47 mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => {
48 if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
49 // into-coerce of a thin pointer to a fat pointer - just
50 // use the operand path.
51 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
52 self.store_operand(bcx, lldest, temp);
53 return bcx;
54 }
55
56 // Unsize of a nontrivial struct. I would prefer for
57 // this to be eliminated by MIR translation, but
58 // `CoerceUnsized` can be passed by a where-clause,
59 // so the (generic) MIR may not be able to expand it.
60 let operand = self.trans_operand(bcx, operand);
61 match operand.val {
62 OperandValue::FatPtr(..) => unreachable!(),
63 OperandValue::Immediate(llval) => {
64 // unsize from an immediate structure. We don't
65 // really need a temporary alloca here, but
66 // avoiding it would require us to have
67 // `coerce_unsized_into` use extractvalue to
68 // index into the struct, and this case isn't
69 // important enough for it.
70 debug!("trans_rvalue: creating ugly alloca");
71 let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp");
72 base::store_ty(bcx, llval, lltemp, operand.ty);
73 base::coerce_unsized_into(bcx,
74 lltemp, operand.ty,
75 lldest, cast_ty);
76 }
77 OperandValue::Ref(llref) => {
78 base::coerce_unsized_into(bcx,
79 llref, operand.ty,
80 lldest, cast_ty);
81 }
82 }
83 bcx
84 }
85
86 mir::Rvalue::Repeat(ref elem, ref count) => {
87 let elem = self.trans_operand(bcx, elem);
88 let size = self.trans_constant(bcx, count).immediate();
89 let base = expr::get_dataptr(bcx, lldest);
90 tvec::iter_vec_raw(bcx, base, elem.ty, size, |bcx, llslot, _| {
91 self.store_operand(bcx, llslot, elem);
92 bcx
93 })
94 }
95
96 mir::Rvalue::Aggregate(_, ref operands) => {
97 for (i, operand) in operands.iter().enumerate() {
98 // Note: perhaps this should be StructGep, but
99 // note that in some cases the values here will
100 // not be structs but arrays.
101 let lldest_i = build::GEPi(bcx, lldest, &[0, i]);
102 self.trans_operand_into(bcx, lldest_i, operand);
103 }
104 bcx
105 }
106
107 mir::Rvalue::Slice { ref input, from_start, from_end } => {
108 let ccx = bcx.ccx();
109 let input = self.trans_lvalue(bcx, input);
110 let (llbase, lllen) = tvec::get_base_and_len(bcx,
111 input.llval,
112 input.ty.to_ty(bcx.tcx()));
113 let llbase1 = build::GEPi(bcx, llbase, &[from_start]);
114 let adj = common::C_uint(ccx, from_start + from_end);
115 let lllen1 = build::Sub(bcx, lllen, adj, DebugLoc::None);
116 let lladdrdest = expr::get_dataptr(bcx, lldest);
117 build::Store(bcx, llbase1, lladdrdest);
118 let llmetadest = expr::get_meta(bcx, lldest);
119 build::Store(bcx, lllen1, llmetadest);
120 bcx
121 }
122
123 mir::Rvalue::InlineAsm(inline_asm) => {
124 asm::trans_inline_asm(bcx, inline_asm)
125 }
126
127 _ => {
128 assert!(rvalue_creates_operand(rvalue));
129 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
130 self.store_operand(bcx, lldest, temp);
131 bcx
132 }
133 }
134 }
135
136 pub fn trans_rvalue_operand(&mut self,
137 bcx: Block<'bcx, 'tcx>,
138 rvalue: &mir::Rvalue<'tcx>)
139 -> (Block<'bcx, 'tcx>, OperandRef<'tcx>)
140 {
141 assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
142
143 match *rvalue {
144 mir::Rvalue::Use(ref operand) => {
145 let operand = self.trans_operand(bcx, operand);
146 (bcx, operand)
147 }
148
149 mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
150 let operand = self.trans_operand(bcx, operand);
151 debug!("cast operand is {}", operand.repr(bcx));
152 let cast_ty = bcx.monomorphize(&cast_ty);
153
154 let val = match *kind {
155 mir::CastKind::ReifyFnPointer |
156 mir::CastKind::UnsafeFnPointer => {
157 // these are no-ops at the LLVM level
158 operand.val
159 }
160 mir::CastKind::Unsize => {
161 // unsize targets other than to a fat pointer currently
162 // can't be operands.
163 assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
164
165 match operand.val {
166 OperandValue::FatPtr(..) => {
167 // unsize from a fat pointer - this is a
168 // "trait-object-to-supertrait" coercion, for
169 // example,
170 // &'a fmt::Debug+Send => &'a fmt::Debug,
171 // and is a no-op at the LLVM level
172 operand.val
173 }
174 OperandValue::Immediate(lldata) => {
175 // "standard" unsize
176 let (lldata, llextra) =
177 base::unsize_thin_ptr(bcx, lldata,
178 operand.ty, cast_ty);
179 OperandValue::FatPtr(lldata, llextra)
180 }
181 OperandValue::Ref(_) => {
182 bcx.sess().bug(
183 &format!("by-ref operand {} in trans_rvalue_operand",
184 operand.repr(bcx)));
185 }
186 }
187 }
188 mir::CastKind::Misc => unimplemented!()
189 };
190 (bcx, OperandRef {
191 val: val,
192 ty: cast_ty
193 })
194 }
195
196 mir::Rvalue::Ref(_, bk, ref lvalue) => {
197 let tr_lvalue = self.trans_lvalue(bcx, lvalue);
198
199 let ty = tr_lvalue.ty.to_ty(bcx.tcx());
200 let ref_ty = bcx.tcx().mk_ref(
201 bcx.tcx().mk_region(ty::ReStatic),
202 ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
203 );
204
205 // Note: lvalues are indirect, so storing the `llval` into the
206 // destination effectively creates a reference.
207 if common::type_is_sized(bcx.tcx(), ty) {
208 (bcx, OperandRef {
209 val: OperandValue::Immediate(tr_lvalue.llval),
210 ty: ref_ty,
211 })
212 } else {
213 (bcx, OperandRef {
214 val: OperandValue::FatPtr(tr_lvalue.llval,
215 tr_lvalue.llextra),
216 ty: ref_ty,
217 })
218 }
219 }
220
221 mir::Rvalue::Len(ref lvalue) => {
222 let tr_lvalue = self.trans_lvalue(bcx, lvalue);
223 (bcx, OperandRef {
224 val: OperandValue::Immediate(self.lvalue_len(bcx, tr_lvalue)),
225 ty: bcx.tcx().types.usize,
226 })
227 }
228
229 mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
230 let lhs = self.trans_operand(bcx, lhs);
231 let rhs = self.trans_operand(bcx, rhs);
232 let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
233 match (lhs.val, rhs.val) {
234 (OperandValue::FatPtr(lhs_addr, lhs_extra),
235 OperandValue::FatPtr(rhs_addr, rhs_extra)) => {
236 base::compare_fat_ptrs(bcx,
237 lhs_addr, lhs_extra,
238 rhs_addr, rhs_extra,
239 lhs.ty, op.to_hir_binop(),
240 DebugLoc::None)
241 }
242 _ => unreachable!()
243 }
244
245 } else {
246 self.trans_scalar_binop(bcx, op,
247 lhs.immediate(), rhs.immediate(),
248 lhs.ty, DebugLoc::None)
249 };
250 (bcx, OperandRef {
251 val: OperandValue::Immediate(llresult),
252 ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty),
253 })
254 }
255
256 mir::Rvalue::UnaryOp(op, ref operand) => {
257 let operand = self.trans_operand(bcx, operand);
258 let lloperand = operand.immediate();
259 let is_float = operand.ty.is_fp();
260 let debug_loc = DebugLoc::None;
261 let llval = match op {
262 mir::UnOp::Not => build::Not(bcx, lloperand, debug_loc),
263 mir::UnOp::Neg => if is_float {
264 build::FNeg(bcx, lloperand, debug_loc)
265 } else {
266 build::Neg(bcx, lloperand, debug_loc)
267 }
268 };
269 (bcx, OperandRef {
270 val: OperandValue::Immediate(llval),
271 ty: operand.ty,
272 })
273 }
274
275 mir::Rvalue::Box(content_ty) => {
276 let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty);
277 let llty = type_of::type_of(bcx.ccx(), content_ty);
278 let llsize = machine::llsize_of(bcx.ccx(), llty);
279 let align = type_of::align_of(bcx.ccx(), content_ty);
280 let llalign = common::C_uint(bcx.ccx(), align);
281 let llty_ptr = llty.ptr_to();
282 let box_ty = bcx.tcx().mk_box(content_ty);
283 let Result { bcx, val: llval } = base::malloc_raw_dyn(bcx,
284 llty_ptr,
285 box_ty,
286 llsize,
287 llalign,
288 DebugLoc::None);
289 (bcx, OperandRef {
290 val: OperandValue::Immediate(llval),
291 ty: box_ty,
292 })
293 }
294
295 mir::Rvalue::Repeat(..) |
296 mir::Rvalue::Aggregate(..) |
297 mir::Rvalue::Slice { .. } |
298 mir::Rvalue::InlineAsm(..) => {
299 bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
300 }
301 }
302 }
303
304 pub fn trans_scalar_binop(&mut self,
305 bcx: Block<'bcx, 'tcx>,
306 op: mir::BinOp,
307 lhs: ValueRef,
308 rhs: ValueRef,
309 input_ty: Ty<'tcx>,
310 debug_loc: DebugLoc) -> ValueRef {
311 let is_float = input_ty.is_fp();
312 let is_signed = input_ty.is_signed();
313 match op {
314 mir::BinOp::Add => if is_float {
315 build::FAdd(bcx, lhs, rhs, debug_loc)
316 } else {
317 build::Add(bcx, lhs, rhs, debug_loc)
318 },
319 mir::BinOp::Sub => if is_float {
320 build::FSub(bcx, lhs, rhs, debug_loc)
321 } else {
322 build::Sub(bcx, lhs, rhs, debug_loc)
323 },
324 mir::BinOp::Mul => if is_float {
325 build::FMul(bcx, lhs, rhs, debug_loc)
326 } else {
327 build::Mul(bcx, lhs, rhs, debug_loc)
328 },
329 mir::BinOp::Div => if is_float {
330 build::FDiv(bcx, lhs, rhs, debug_loc)
331 } else if is_signed {
332 build::SDiv(bcx, lhs, rhs, debug_loc)
333 } else {
334 build::UDiv(bcx, lhs, rhs, debug_loc)
335 },
336 mir::BinOp::Rem => if is_float {
337 // LLVM currently always lowers the `frem` instructions appropriate
338 // library calls typically found in libm. Notably f64 gets wired up
339 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
340 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
341 // instead just an inline function in a header that goes up to a
342 // f64, uses `fmod`, and then comes back down to a f32.
343 //
344 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
345 // still unconditionally lower frem instructions over 32-bit floats
346 // to a call to `fmodf`. To work around this we special case MSVC
347 // 32-bit float rem instructions and instead do the call out to
348 // `fmod` ourselves.
349 //
350 // Note that this is currently duplicated with src/libcore/ops.rs
351 // which does the same thing, and it would be nice to perhaps unify
352 // these two implementations one day! Also note that we call `fmod`
353 // for both 32 and 64-bit floats because if we emit any FRem
354 // instruction at all then LLVM is capable of optimizing it into a
355 // 32-bit FRem (which we're trying to avoid).
356 let tcx = bcx.tcx();
357 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
358 tcx.sess.target.target.arch == "x86";
359 if use_fmod {
360 let f64t = Type::f64(bcx.ccx());
361 let fty = Type::func(&[f64t, f64t], &f64t);
362 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
363 tcx.types.f64);
364 if input_ty == tcx.types.f32 {
365 let lllhs = build::FPExt(bcx, lhs, f64t);
366 let llrhs = build::FPExt(bcx, rhs, f64t);
367 let llres = build::Call(bcx, llfn, &[lllhs, llrhs],
368 None, debug_loc);
369 build::FPTrunc(bcx, llres, Type::f32(bcx.ccx()))
370 } else {
371 build::Call(bcx, llfn, &[lhs, rhs],
372 None, debug_loc)
373 }
374 } else {
375 build::FRem(bcx, lhs, rhs, debug_loc)
376 }
377 } else if is_signed {
378 build::SRem(bcx, lhs, rhs, debug_loc)
379 } else {
380 build::URem(bcx, lhs, rhs, debug_loc)
381 },
382 mir::BinOp::BitOr => build::Or(bcx, lhs, rhs, debug_loc),
383 mir::BinOp::BitAnd => build::And(bcx, lhs, rhs, debug_loc),
384 mir::BinOp::BitXor => build::Xor(bcx, lhs, rhs, debug_loc),
385 mir::BinOp::Shl => common::build_unchecked_lshift(bcx,
386 lhs,
387 rhs,
388 debug_loc),
389 mir::BinOp::Shr => common::build_unchecked_rshift(bcx,
390 input_ty,
391 lhs,
392 rhs,
393 debug_loc),
394 mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
395 mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
396 base::compare_scalar_types(bcx, lhs, rhs, input_ty,
397 op.to_hir_binop(), debug_loc)
398 }
399 }
400 }
401 }
402
403 pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
404 match *rvalue {
405 mir::Rvalue::Use(..) | // (*)
406 mir::Rvalue::Ref(..) |
407 mir::Rvalue::Len(..) |
408 mir::Rvalue::Cast(..) | // (*)
409 mir::Rvalue::BinaryOp(..) |
410 mir::Rvalue::UnaryOp(..) |
411 mir::Rvalue::Box(..) =>
412 true,
413 mir::Rvalue::Repeat(..) |
414 mir::Rvalue::Aggregate(..) |
415 mir::Rvalue::Slice { .. } |
416 mir::Rvalue::InlineAsm(..) =>
417 false,
418 }
419
420 // (*) this is only true if the type is suitable
421 }