1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use rustc
::ty
::{self, Ty, TypeFoldable}
;
13 use rustc
::mir
::repr
as mir
;
14 use rustc
::mir
::tcx
::LvalueTy
;
19 use common
::{self, BlockAndBuilder, C_uint}
;
28 use super::{MirContext, TempRef}
;
30 #[derive(Copy, Clone)]
31 pub struct LvalueRef
<'tcx
> {
32 /// Pointer to the contents of the lvalue
35 /// This lvalue's extra data if it is unsized, or null
36 pub llextra
: ValueRef
,
38 /// Monomorphized type of this lvalue, including variant information
39 pub ty
: LvalueTy
<'tcx
>,
42 impl<'tcx
> LvalueRef
<'tcx
> {
43 pub fn new_sized(llval
: ValueRef
, lvalue_ty
: LvalueTy
<'tcx
>) -> LvalueRef
<'tcx
> {
44 LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
47 pub fn alloca
<'bcx
>(bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
52 assert
!(!ty
.has_erasable_regions());
53 let lltemp
= bcx
.with_block(|bcx
| base
::alloc_ty(bcx
, ty
, name
));
54 if bcx
.fcx().type_needs_drop(ty
) {
55 drop
::drop_fill(bcx
, lltemp
, ty
);
57 LvalueRef
::new_sized(lltemp
, LvalueTy
::from_ty(ty
))
61 pub fn get_meta(b
: &Builder
, fat_ptr
: ValueRef
) -> ValueRef
{
62 b
.struct_gep(fat_ptr
, abi
::FAT_PTR_EXTRA
)
65 pub fn get_dataptr(b
: &Builder
, fat_ptr
: ValueRef
) -> ValueRef
{
66 b
.struct_gep(fat_ptr
, abi
::FAT_PTR_ADDR
)
69 pub fn load_fat_ptr(b
: &Builder
, fat_ptr
: ValueRef
) -> (ValueRef
, ValueRef
) {
70 (b
.load(get_dataptr(b
, fat_ptr
)), b
.load(get_meta(b
, fat_ptr
)))
73 impl<'bcx
, 'tcx
> MirContext
<'bcx
, 'tcx
> {
74 pub fn lvalue_len(&mut self,
75 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
76 lvalue
: LvalueRef
<'tcx
>)
78 match lvalue
.ty
.to_ty(bcx
.tcx()).sty
{
79 ty
::TyArray(_
, n
) => common
::C_uint(bcx
.ccx(), n
),
80 ty
::TySlice(_
) | ty
::TyStr
=> {
81 assert
!(lvalue
.llextra
!= ptr
::null_mut());
84 _
=> bug
!("unexpected type in lvalue_len"),
88 pub fn trans_lvalue(&mut self,
89 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
90 lvalue
: &mir
::Lvalue
<'tcx
>)
92 debug
!("trans_lvalue(lvalue={:?})", lvalue
);
98 mir
::Lvalue
::Var(index
) => self.vars
[index
as usize],
99 mir
::Lvalue
::Temp(index
) => match self.temps
[index
as usize] {
100 TempRef
::Lvalue(lvalue
) =>
102 TempRef
::Operand(..) =>
103 bug
!("using operand temp {:?} as lvalue", lvalue
),
105 mir
::Lvalue
::Arg(index
) => self.args
[index
as usize],
106 mir
::Lvalue
::Static(def_id
) => {
107 let const_ty
= self.mir
.lvalue_ty(tcx
, lvalue
);
108 LvalueRef
::new_sized(consts
::get_static(ccx
, def_id
).val
, const_ty
)
110 mir
::Lvalue
::ReturnPointer
=> {
111 let llval
= if !fcx
.fn_ty
.ret
.is_ignore() {
112 bcx
.with_block(|bcx
| {
113 fcx
.get_ret_slot(bcx
, "")
116 // This is a void return; that is, there’s no place to store the value and
117 // there cannot really be one (or storing into it doesn’t make sense, anyway).
118 // Ergo, we return an undef ValueRef, so we do not have to special-case every
119 // place using lvalues, and could use it the same way you use a regular
120 // ReturnPointer LValue (i.e. store into it, load from it etc).
121 let llty
= fcx
.fn_ty
.ret
.original_ty
.ptr_to();
123 llvm
::LLVMGetUndef(llty
.to_ref())
126 let fn_return_ty
= bcx
.monomorphize(&self.mir
.return_ty
);
127 let return_ty
= fn_return_ty
.unwrap();
128 LvalueRef
::new_sized(llval
, LvalueTy
::from_ty(return_ty
))
130 mir
::Lvalue
::Projection(ref projection
) => {
131 let tr_base
= self.trans_lvalue(bcx
, &projection
.base
);
132 let projected_ty
= tr_base
.ty
.projection_ty(tcx
, &projection
.elem
);
133 let projected_ty
= bcx
.monomorphize(&projected_ty
);
135 let project_index
= |llindex
| {
136 let element
= if let ty
::TySlice(_
) = tr_base
.ty
.to_ty(tcx
).sty
{
137 // Slices already point to the array element type.
138 bcx
.inbounds_gep(tr_base
.llval
, &[llindex
])
140 let zero
= common
::C_uint(bcx
.ccx(), 0u64);
141 bcx
.inbounds_gep(tr_base
.llval
, &[zero
, llindex
])
143 (element
, ptr
::null_mut())
146 let (llprojected
, llextra
) = match projection
.elem
{
147 mir
::ProjectionElem
::Deref
=> {
148 let base_ty
= tr_base
.ty
.to_ty(tcx
);
149 if common
::type_is_sized(tcx
, projected_ty
.to_ty(tcx
)) {
150 (base
::load_ty_builder(bcx
, tr_base
.llval
, base_ty
),
153 load_fat_ptr(bcx
, tr_base
.llval
)
156 mir
::ProjectionElem
::Field(ref field
, _
) => {
157 let base_ty
= tr_base
.ty
.to_ty(tcx
);
158 let base_repr
= adt
::represent_type(ccx
, base_ty
);
159 let discr
= match tr_base
.ty
{
160 LvalueTy
::Ty { .. }
=> 0,
161 LvalueTy
::Downcast { adt_def: _, substs: _, variant_index: v }
=> v
,
163 let discr
= discr
as u64;
164 let is_sized
= common
::type_is_sized(tcx
, projected_ty
.to_ty(tcx
));
165 let base
= if is_sized
{
166 adt
::MaybeSizedValue
::sized(tr_base
.llval
)
168 adt
::MaybeSizedValue
::unsized_(tr_base
.llval
, tr_base
.llextra
)
170 let llprojected
= adt
::trans_field_ptr_builder(bcx
, &base_repr
, base
,
171 Disr(discr
), field
.index());
172 let llextra
= if is_sized
{
177 (llprojected
, llextra
)
179 mir
::ProjectionElem
::Index(ref index
) => {
180 let index
= self.trans_operand(bcx
, index
);
181 project_index(self.prepare_index(bcx
, index
.immediate()))
183 mir
::ProjectionElem
::ConstantIndex
{ offset
,
186 let lloffset
= C_uint(bcx
.ccx(), offset
);
187 project_index(self.prepare_index(bcx
, lloffset
))
189 mir
::ProjectionElem
::ConstantIndex
{ offset
,
192 let lloffset
= C_uint(bcx
.ccx(), offset
);
193 let lllen
= self.lvalue_len(bcx
, tr_base
);
194 let llindex
= bcx
.sub(lllen
, lloffset
);
195 project_index(self.prepare_index(bcx
, llindex
))
197 mir
::ProjectionElem
::Downcast(..) => {
198 (tr_base
.llval
, tr_base
.llextra
)
210 // Perform an action using the given Lvalue.
211 // If the Lvalue is an empty TempRef::Operand, then a temporary stack slot
212 // is created first, then used as an operand to update the Lvalue.
213 pub fn with_lvalue_ref
<F
, U
>(&mut self, bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
214 lvalue
: &mir
::Lvalue
<'tcx
>, f
: F
) -> U
215 where F
: FnOnce(&mut Self, LvalueRef
<'tcx
>) -> U
218 mir
::Lvalue
::Temp(idx
) => {
219 match self.temps
[idx
as usize] {
220 TempRef
::Lvalue(lvalue
) => f(self, lvalue
),
221 TempRef
::Operand(None
) => {
222 let lvalue_ty
= self.mir
.lvalue_ty(bcx
.tcx(), lvalue
);
223 let lvalue_ty
= bcx
.monomorphize(&lvalue_ty
);
224 let lvalue
= LvalueRef
::alloca(bcx
,
225 lvalue_ty
.to_ty(bcx
.tcx()),
227 let ret
= f(self, lvalue
);
228 let op
= self.trans_load(bcx
, lvalue
.llval
, lvalue_ty
.to_ty(bcx
.tcx()));
229 self.temps
[idx
as usize] = TempRef
::Operand(Some(op
));
232 TempRef
::Operand(Some(_
)) => {
233 bug
!("Lvalue temp already set");
238 let lvalue
= self.trans_lvalue(bcx
, lvalue
);
244 /// Adjust the bitwidth of an index since LLVM is less forgiving
247 /// nmatsakis: is this still necessary? Not sure.
248 fn prepare_index(&mut self,
249 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
254 let index_size
= machine
::llbitsize_of_real(bcx
.ccx(), common
::val_ty(llindex
));
255 let int_size
= machine
::llbitsize_of_real(bcx
.ccx(), ccx
.int_type());
256 if index_size
< int_size
{
257 bcx
.zext(llindex
, ccx
.int_type())
258 } else if index_size
> int_size
{
259 bcx
.trunc(llindex
, ccx
.int_type())