1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use rustc
::middle
::ty
::{self, Ty, TypeFoldable}
;
13 use rustc
::mir
::repr
as mir
;
14 use rustc
::mir
::tcx
::LvalueTy
;
17 use trans
::common
::{self, BlockAndBuilder}
;
25 use super::{MirContext, TempRef}
;
27 #[derive(Copy, Clone)]
28 pub struct LvalueRef
<'tcx
> {
29 /// Pointer to the contents of the lvalue
32 /// This lvalue's extra data if it is unsized, or null
33 pub llextra
: ValueRef
,
35 /// Monomorphized type of this lvalue, including variant information
36 pub ty
: LvalueTy
<'tcx
>,
39 impl<'tcx
> LvalueRef
<'tcx
> {
40 pub fn new_sized(llval
: ValueRef
, lvalue_ty
: LvalueTy
<'tcx
>) -> LvalueRef
<'tcx
> {
41 LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
44 pub fn alloca
<'bcx
>(bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
49 assert
!(!ty
.has_erasable_regions());
50 let lltemp
= bcx
.with_block(|bcx
| base
::alloc_ty(bcx
, ty
, name
));
51 LvalueRef
::new_sized(lltemp
, LvalueTy
::from_ty(ty
))
55 impl<'bcx
, 'tcx
> MirContext
<'bcx
, 'tcx
> {
56 pub fn lvalue_len(&mut self,
57 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
58 lvalue
: LvalueRef
<'tcx
>)
60 match lvalue
.ty
.to_ty(bcx
.tcx()).sty
{
61 ty
::TyArray(_
, n
) => common
::C_uint(bcx
.ccx(), n
),
62 ty
::TySlice(_
) | ty
::TyStr
=> {
63 assert
!(lvalue
.llextra
!= ptr
::null_mut());
66 _
=> bcx
.sess().bug("unexpected type in lvalue_len"),
70 pub fn trans_lvalue(&mut self,
71 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
72 lvalue
: &mir
::Lvalue
<'tcx
>)
74 debug
!("trans_lvalue(lvalue={:?})", lvalue
);
80 mir
::Lvalue
::Var(index
) => self.vars
[index
as usize],
81 mir
::Lvalue
::Temp(index
) => match self.temps
[index
as usize] {
82 TempRef
::Lvalue(lvalue
) =>
84 TempRef
::Operand(..) =>
85 tcx
.sess
.bug(&format
!("using operand temp {:?} as lvalue", lvalue
)),
87 mir
::Lvalue
::Arg(index
) => self.args
[index
as usize],
88 mir
::Lvalue
::Static(def_id
) => {
89 let const_ty
= self.mir
.lvalue_ty(tcx
, lvalue
);
91 common
::get_static_val(ccx
, def_id
, const_ty
.to_ty(tcx
)),
94 mir
::Lvalue
::ReturnPointer
=> {
95 let fn_return_ty
= bcx
.monomorphize(&self.mir
.return_ty
);
96 let return_ty
= fn_return_ty
.unwrap();
97 let llval
= if !common
::return_type_is_void(bcx
.ccx(), return_ty
) {
98 bcx
.with_block(|bcx
| {
99 fcx
.get_ret_slot(bcx
, fn_return_ty
, "")
102 // This is a void return; that is, there’s no place to store the value and
103 // there cannot really be one (or storing into it doesn’t make sense, anyway).
104 // Ergo, we return an undef ValueRef, so we do not have to special-case every
105 // place using lvalues, and could use it the same way you use a regular
106 // ReturnPointer LValue (i.e. store into it, load from it etc).
107 let llty
= type_of
::type_of(bcx
.ccx(), return_ty
).ptr_to();
109 llvm
::LLVMGetUndef(llty
.to_ref())
112 LvalueRef
::new_sized(llval
, LvalueTy
::from_ty(return_ty
))
114 mir
::Lvalue
::Projection(ref projection
) => {
115 let tr_base
= self.trans_lvalue(bcx
, &projection
.base
);
116 let projected_ty
= tr_base
.ty
.projection_ty(tcx
, &projection
.elem
);
117 let (llprojected
, llextra
) = match projection
.elem
{
118 mir
::ProjectionElem
::Deref
=> {
119 let base_ty
= tr_base
.ty
.to_ty(tcx
);
120 bcx
.with_block(|bcx
| {
121 if common
::type_is_sized(tcx
, projected_ty
.to_ty(tcx
)) {
122 (base
::load_ty(bcx
, tr_base
.llval
, base_ty
),
125 base
::load_fat_ptr(bcx
, tr_base
.llval
, base_ty
)
129 mir
::ProjectionElem
::Field(ref field
, _
) => {
130 let base_ty
= tr_base
.ty
.to_ty(tcx
);
131 let base_repr
= adt
::represent_type(ccx
, base_ty
);
132 let discr
= match tr_base
.ty
{
133 LvalueTy
::Ty { .. }
=> 0,
134 LvalueTy
::Downcast { adt_def: _, substs: _, variant_index: v }
=> v
,
136 let discr
= discr
as u64;
137 let is_sized
= common
::type_is_sized(tcx
, projected_ty
.to_ty(tcx
));
138 let base
= if is_sized
{
139 adt
::MaybeSizedValue
::sized(tr_base
.llval
)
141 adt
::MaybeSizedValue
::unsized_(tr_base
.llval
, tr_base
.llextra
)
143 let llprojected
= bcx
.with_block(|bcx
| {
144 adt
::trans_field_ptr(bcx
, &base_repr
, base
, Disr(discr
), field
.index())
146 let llextra
= if is_sized
{
151 (llprojected
, llextra
)
153 mir
::ProjectionElem
::Index(ref index
) => {
154 let index
= self.trans_operand(bcx
, index
);
155 let llindex
= self.prepare_index(bcx
, index
.immediate());
156 let zero
= common
::C_uint(bcx
.ccx(), 0u64);
157 (bcx
.inbounds_gep(tr_base
.llval
, &[zero
, llindex
]),
160 mir
::ProjectionElem
::ConstantIndex
{ offset
,
163 let lloffset
= common
::C_u32(bcx
.ccx(), offset
);
164 let llindex
= self.prepare_index(bcx
, lloffset
);
165 let zero
= common
::C_uint(bcx
.ccx(), 0u64);
166 (bcx
.inbounds_gep(tr_base
.llval
, &[zero
, llindex
]),
169 mir
::ProjectionElem
::ConstantIndex
{ offset
,
172 let lloffset
= common
::C_u32(bcx
.ccx(), offset
);
173 let lllen
= self.lvalue_len(bcx
, tr_base
);
174 let llindex
= bcx
.sub(lllen
, lloffset
);
175 let llindex
= self.prepare_index(bcx
, llindex
);
176 let zero
= common
::C_uint(bcx
.ccx(), 0u64);
177 (bcx
.inbounds_gep(tr_base
.llval
, &[zero
, llindex
]),
180 mir
::ProjectionElem
::Downcast(..) => {
181 (tr_base
.llval
, tr_base
.llextra
)
193 /// Adjust the bitwidth of an index since LLVM is less forgiving
196 /// nmatsakis: is this still necessary? Not sure.
197 fn prepare_index(&mut self,
198 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
203 let index_size
= machine
::llbitsize_of_real(bcx
.ccx(), common
::val_ty(llindex
));
204 let int_size
= machine
::llbitsize_of_real(bcx
.ccx(), ccx
.int_type());
205 if index_size
< int_size
{
206 bcx
.zext(llindex
, ccx
.int_type())
207 } else if index_size
> int_size
{
208 bcx
.trunc(llindex
, ccx
.int_type())