]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/mir/lvalue.rs
Imported Upstream version 1.8.0+dfsg1
[rustc.git] / src / librustc_trans / trans / mir / lvalue.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::ValueRef;
12 use rustc::middle::ty::{self, Ty, TypeFoldable};
13 use rustc::mir::repr as mir;
14 use rustc::mir::tcx::LvalueTy;
15 use trans::adt;
16 use trans::base;
17 use trans::common::{self, BlockAndBuilder};
18 use trans::machine;
19 use trans::type_of;
20 use llvm;
21 use trans::Disr;
22
23 use std::ptr;
24
25 use super::{MirContext, TempRef};
26
27 #[derive(Copy, Clone)]
28 pub struct LvalueRef<'tcx> {
29 /// Pointer to the contents of the lvalue
30 pub llval: ValueRef,
31
32 /// This lvalue's extra data if it is unsized, or null
33 pub llextra: ValueRef,
34
35 /// Monomorphized type of this lvalue, including variant information
36 pub ty: LvalueTy<'tcx>,
37 }
38
39 impl<'tcx> LvalueRef<'tcx> {
40 pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
41 LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
42 }
43
44 pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
45 ty: Ty<'tcx>,
46 name: &str)
47 -> LvalueRef<'tcx>
48 {
49 assert!(!ty.has_erasable_regions());
50 let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
51 LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
52 }
53 }
54
55 impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
56 pub fn lvalue_len(&mut self,
57 bcx: &BlockAndBuilder<'bcx, 'tcx>,
58 lvalue: LvalueRef<'tcx>)
59 -> ValueRef {
60 match lvalue.ty.to_ty(bcx.tcx()).sty {
61 ty::TyArray(_, n) => common::C_uint(bcx.ccx(), n),
62 ty::TySlice(_) | ty::TyStr => {
63 assert!(lvalue.llextra != ptr::null_mut());
64 lvalue.llextra
65 }
66 _ => bcx.sess().bug("unexpected type in lvalue_len"),
67 }
68 }
69
70 pub fn trans_lvalue(&mut self,
71 bcx: &BlockAndBuilder<'bcx, 'tcx>,
72 lvalue: &mir::Lvalue<'tcx>)
73 -> LvalueRef<'tcx> {
74 debug!("trans_lvalue(lvalue={:?})", lvalue);
75
76 let fcx = bcx.fcx();
77 let ccx = bcx.ccx();
78 let tcx = bcx.tcx();
79 match *lvalue {
80 mir::Lvalue::Var(index) => self.vars[index as usize],
81 mir::Lvalue::Temp(index) => match self.temps[index as usize] {
82 TempRef::Lvalue(lvalue) =>
83 lvalue,
84 TempRef::Operand(..) =>
85 tcx.sess.bug(&format!("using operand temp {:?} as lvalue", lvalue)),
86 },
87 mir::Lvalue::Arg(index) => self.args[index as usize],
88 mir::Lvalue::Static(def_id) => {
89 let const_ty = self.mir.lvalue_ty(tcx, lvalue);
90 LvalueRef::new_sized(
91 common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)),
92 const_ty)
93 },
94 mir::Lvalue::ReturnPointer => {
95 let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
96 let return_ty = fn_return_ty.unwrap();
97 let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) {
98 bcx.with_block(|bcx| {
99 fcx.get_ret_slot(bcx, fn_return_ty, "")
100 })
101 } else {
102 // This is a void return; that is, there’s no place to store the value and
103 // there cannot really be one (or storing into it doesn’t make sense, anyway).
104 // Ergo, we return an undef ValueRef, so we do not have to special-case every
105 // place using lvalues, and could use it the same way you use a regular
106 // ReturnPointer LValue (i.e. store into it, load from it etc).
107 let llty = type_of::type_of(bcx.ccx(), return_ty).ptr_to();
108 unsafe {
109 llvm::LLVMGetUndef(llty.to_ref())
110 }
111 };
112 LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
113 },
114 mir::Lvalue::Projection(ref projection) => {
115 let tr_base = self.trans_lvalue(bcx, &projection.base);
116 let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
117 let (llprojected, llextra) = match projection.elem {
118 mir::ProjectionElem::Deref => {
119 let base_ty = tr_base.ty.to_ty(tcx);
120 bcx.with_block(|bcx| {
121 if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
122 (base::load_ty(bcx, tr_base.llval, base_ty),
123 ptr::null_mut())
124 } else {
125 base::load_fat_ptr(bcx, tr_base.llval, base_ty)
126 }
127 })
128 }
129 mir::ProjectionElem::Field(ref field, _) => {
130 let base_ty = tr_base.ty.to_ty(tcx);
131 let base_repr = adt::represent_type(ccx, base_ty);
132 let discr = match tr_base.ty {
133 LvalueTy::Ty { .. } => 0,
134 LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
135 };
136 let discr = discr as u64;
137 let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
138 let base = if is_sized {
139 adt::MaybeSizedValue::sized(tr_base.llval)
140 } else {
141 adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
142 };
143 let llprojected = bcx.with_block(|bcx| {
144 adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index())
145 });
146 let llextra = if is_sized {
147 ptr::null_mut()
148 } else {
149 tr_base.llextra
150 };
151 (llprojected, llextra)
152 }
153 mir::ProjectionElem::Index(ref index) => {
154 let index = self.trans_operand(bcx, index);
155 let llindex = self.prepare_index(bcx, index.immediate());
156 let zero = common::C_uint(bcx.ccx(), 0u64);
157 (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
158 ptr::null_mut())
159 }
160 mir::ProjectionElem::ConstantIndex { offset,
161 from_end: false,
162 min_length: _ } => {
163 let lloffset = common::C_u32(bcx.ccx(), offset);
164 let llindex = self.prepare_index(bcx, lloffset);
165 let zero = common::C_uint(bcx.ccx(), 0u64);
166 (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
167 ptr::null_mut())
168 }
169 mir::ProjectionElem::ConstantIndex { offset,
170 from_end: true,
171 min_length: _ } => {
172 let lloffset = common::C_u32(bcx.ccx(), offset);
173 let lllen = self.lvalue_len(bcx, tr_base);
174 let llindex = bcx.sub(lllen, lloffset);
175 let llindex = self.prepare_index(bcx, llindex);
176 let zero = common::C_uint(bcx.ccx(), 0u64);
177 (bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
178 ptr::null_mut())
179 }
180 mir::ProjectionElem::Downcast(..) => {
181 (tr_base.llval, tr_base.llextra)
182 }
183 };
184 LvalueRef {
185 llval: llprojected,
186 llextra: llextra,
187 ty: projected_ty,
188 }
189 }
190 }
191 }
192
193 /// Adjust the bitwidth of an index since LLVM is less forgiving
194 /// than we are.
195 ///
196 /// nmatsakis: is this still necessary? Not sure.
197 fn prepare_index(&mut self,
198 bcx: &BlockAndBuilder<'bcx, 'tcx>,
199 llindex: ValueRef)
200 -> ValueRef
201 {
202 let ccx = bcx.ccx();
203 let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex));
204 let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type());
205 if index_size < int_size {
206 bcx.zext(llindex, ccx.int_type())
207 } else if index_size > int_size {
208 bcx.trunc(llindex, ccx.int_type())
209 } else {
210 llindex
211 }
212 }
213 }