]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/mir/lvalue.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc_trans / mir / lvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::ValueRef;
12 use rustc::ty::{self, Ty, TypeFoldable};
13 use rustc::mir::repr as mir;
14 use rustc::mir::tcx::LvalueTy;
15 use abi;
16 use adt;
17 use base;
18 use builder::Builder;
19 use common::{self, BlockAndBuilder, C_uint};
20 use consts;
21 use machine;
22 use mir::drop;
23 use llvm;
24 use Disr;
25
26 use std::ptr;
27
28 use super::{MirContext, TempRef};
29
30 #[derive(Copy, Clone)]
31 pub struct LvalueRef<'tcx> {
32 /// Pointer to the contents of the lvalue
33 pub llval: ValueRef,
34
35 /// This lvalue's extra data if it is unsized, or null
36 pub llextra: ValueRef,
37
38 /// Monomorphized type of this lvalue, including variant information
39 pub ty: LvalueTy<'tcx>,
40 }
41
42 impl<'tcx> LvalueRef<'tcx> {
43 pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
44 LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
45 }
46
47 pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
48 ty: Ty<'tcx>,
49 name: &str)
50 -> LvalueRef<'tcx>
51 {
52 assert!(!ty.has_erasable_regions());
53 let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
54 if bcx.fcx().type_needs_drop(ty) {
55 drop::drop_fill(bcx, lltemp, ty);
56 }
57 LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
58 }
59 }
60
61 pub fn get_meta(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
62 b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
63 }
64
65 pub fn get_dataptr(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
66 b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
67 }
68
69 pub fn load_fat_ptr(b: &Builder, fat_ptr: ValueRef) -> (ValueRef, ValueRef) {
70 (b.load(get_dataptr(b, fat_ptr)), b.load(get_meta(b, fat_ptr)))
71 }
72
73 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
74 pub fn lvalue_len(&mut self,
75 bcx: &BlockAndBuilder<'bcx, 'tcx>,
76 lvalue: LvalueRef<'tcx>)
77 -> ValueRef {
78 match lvalue.ty.to_ty(bcx.tcx()).sty {
79 ty::TyArray(_, n) => common::C_uint(bcx.ccx(), n),
80 ty::TySlice(_) | ty::TyStr => {
81 assert!(lvalue.llextra != ptr::null_mut());
82 lvalue.llextra
83 }
84 _ => bug!("unexpected type in lvalue_len"),
85 }
86 }
87
88 pub fn trans_lvalue(&mut self,
89 bcx: &BlockAndBuilder<'bcx, 'tcx>,
90 lvalue: &mir::Lvalue<'tcx>)
91 -> LvalueRef<'tcx> {
92 debug!("trans_lvalue(lvalue={:?})", lvalue);
93
94 let fcx = bcx.fcx();
95 let ccx = bcx.ccx();
96 let tcx = bcx.tcx();
97 match *lvalue {
98 mir::Lvalue::Var(index) => self.vars[index as usize],
99 mir::Lvalue::Temp(index) => match self.temps[index as usize] {
100 TempRef::Lvalue(lvalue) =>
101 lvalue,
102 TempRef::Operand(..) =>
103 bug!("using operand temp {:?} as lvalue", lvalue),
104 },
105 mir::Lvalue::Arg(index) => self.args[index as usize],
106 mir::Lvalue::Static(def_id) => {
107 let const_ty = self.mir.lvalue_ty(tcx, lvalue);
108 LvalueRef::new_sized(consts::get_static(ccx, def_id).val, const_ty)
109 },
110 mir::Lvalue::ReturnPointer => {
111 let llval = if !fcx.fn_ty.ret.is_ignore() {
112 bcx.with_block(|bcx| {
113 fcx.get_ret_slot(bcx, "")
114 })
115 } else {
116 // This is a void return; that is, there’s no place to store the value and
117 // there cannot really be one (or storing into it doesn’t make sense, anyway).
118 // Ergo, we return an undef ValueRef, so we do not have to special-case every
119 // place using lvalues, and could use it the same way you use a regular
120 // ReturnPointer LValue (i.e. store into it, load from it etc).
121 let llty = fcx.fn_ty.ret.original_ty.ptr_to();
122 unsafe {
123 llvm::LLVMGetUndef(llty.to_ref())
124 }
125 };
126 let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
127 let return_ty = fn_return_ty.unwrap();
128 LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
129 },
130 mir::Lvalue::Projection(ref projection) => {
131 let tr_base = self.trans_lvalue(bcx, &projection.base);
132 let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
133 let projected_ty = bcx.monomorphize(&projected_ty);
134
135 let project_index = |llindex| {
136 let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
137 // Slices already point to the array element type.
138 bcx.inbounds_gep(tr_base.llval, &[llindex])
139 } else {
140 let zero = common::C_uint(bcx.ccx(), 0u64);
141 bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
142 };
143 (element, ptr::null_mut())
144 };
145
146 let (llprojected, llextra) = match projection.elem {
147 mir::ProjectionElem::Deref => {
148 let base_ty = tr_base.ty.to_ty(tcx);
149 if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
150 (base::load_ty_builder(bcx, tr_base.llval, base_ty),
151 ptr::null_mut())
152 } else {
153 load_fat_ptr(bcx, tr_base.llval)
154 }
155 }
156 mir::ProjectionElem::Field(ref field, _) => {
157 let base_ty = tr_base.ty.to_ty(tcx);
158 let base_repr = adt::represent_type(ccx, base_ty);
159 let discr = match tr_base.ty {
160 LvalueTy::Ty { .. } => 0,
161 LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
162 };
163 let discr = discr as u64;
164 let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
165 let base = if is_sized {
166 adt::MaybeSizedValue::sized(tr_base.llval)
167 } else {
168 adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
169 };
170 let llprojected = adt::trans_field_ptr_builder(bcx, &base_repr, base,
171 Disr(discr), field.index());
172 let llextra = if is_sized {
173 ptr::null_mut()
174 } else {
175 tr_base.llextra
176 };
177 (llprojected, llextra)
178 }
179 mir::ProjectionElem::Index(ref index) => {
180 let index = self.trans_operand(bcx, index);
181 project_index(self.prepare_index(bcx, index.immediate()))
182 }
183 mir::ProjectionElem::ConstantIndex { offset,
184 from_end: false,
185 min_length: _ } => {
186 let lloffset = C_uint(bcx.ccx(), offset);
187 project_index(self.prepare_index(bcx, lloffset))
188 }
189 mir::ProjectionElem::ConstantIndex { offset,
190 from_end: true,
191 min_length: _ } => {
192 let lloffset = C_uint(bcx.ccx(), offset);
193 let lllen = self.lvalue_len(bcx, tr_base);
194 let llindex = bcx.sub(lllen, lloffset);
195 project_index(self.prepare_index(bcx, llindex))
196 }
197 mir::ProjectionElem::Downcast(..) => {
198 (tr_base.llval, tr_base.llextra)
199 }
200 };
201 LvalueRef {
202 llval: llprojected,
203 llextra: llextra,
204 ty: projected_ty,
205 }
206 }
207 }
208 }
209
210 // Perform an action using the given Lvalue.
211 // If the Lvalue is an empty TempRef::Operand, then a temporary stack slot
212 // is created first, then used as an operand to update the Lvalue.
213 pub fn with_lvalue_ref<F, U>(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
214 lvalue: &mir::Lvalue<'tcx>, f: F) -> U
215 where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
216 {
217 match *lvalue {
218 mir::Lvalue::Temp(idx) => {
219 match self.temps[idx as usize] {
220 TempRef::Lvalue(lvalue) => f(self, lvalue),
221 TempRef::Operand(None) => {
222 let lvalue_ty = self.mir.lvalue_ty(bcx.tcx(), lvalue);
223 let lvalue_ty = bcx.monomorphize(&lvalue_ty);
224 let lvalue = LvalueRef::alloca(bcx,
225 lvalue_ty.to_ty(bcx.tcx()),
226 "lvalue_temp");
227 let ret = f(self, lvalue);
228 let op = self.trans_load(bcx, lvalue.llval, lvalue_ty.to_ty(bcx.tcx()));
229 self.temps[idx as usize] = TempRef::Operand(Some(op));
230 ret
231 }
232 TempRef::Operand(Some(_)) => {
233 bug!("Lvalue temp already set");
234 }
235 }
236 }
237 _ => {
238 let lvalue = self.trans_lvalue(bcx, lvalue);
239 f(self, lvalue)
240 }
241 }
242 }
243
244 /// Adjust the bitwidth of an index since LLVM is less forgiving
245 /// than we are.
246 ///
247 /// nmatsakis: is this still necessary? Not sure.
248 fn prepare_index(&mut self,
249 bcx: &BlockAndBuilder<'bcx, 'tcx>,
250 llindex: ValueRef)
251 -> ValueRef
252 {
253 let ccx = bcx.ccx();
254 let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex));
255 let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type());
256 if index_size < int_size {
257 bcx.zext(llindex, ccx.int_type())
258 } else if index_size > int_size {
259 bcx.trunc(llindex, ccx.int_type())
260 } else {
261 llindex
262 }
263 }
264 }