use builder::Builder;
use callee;
use common::{self, val_ty, C_bool, C_null, C_uint};
-use common::{C_integral};
+use common::{C_integral, C_i32};
use adt;
use machine;
use monomorphize;
use tvec;
use value::Value;
-use super::MirContext;
+use super::{MirContext, LocalRef};
use super::constant::const_scalar_checked_binop;
use super::operand::{OperandRef, OperandValue};
use super::lvalue::LvalueRef;
}
mir::Rvalue::Repeat(ref elem, ref count) => {
+ let dest_ty = dest.ty.to_ty(bcx.tcx());
+
+ // No need to inizialize memory of a zero-sized slice
+ if common::type_is_zero_size(bcx.ccx, dest_ty) {
+ return bcx;
+ }
+
let tr_elem = self.trans_operand(&bcx, elem);
let size = count.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
+ let align = dest.alignment.to_align();
+
+ if let OperandValue::Immediate(v) = tr_elem.val {
+ // Use llvm.memset.p0i8.* to initialize all zero arrays
+ if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
+ let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
+ let align = C_i32(bcx.ccx, align as i32);
+ let ty = type_of::type_of(bcx.ccx, dest_ty);
+ let size = machine::llsize_of(bcx.ccx, ty);
+ let fill = C_integral(Type::i8(bcx.ccx), 0, false);
+ base::call_memset(&bcx, base, fill, size, align, false);
+ return bcx;
+ }
+
+ // Use llvm.memset.p0i8.* to initialize byte arrays
+ if common::val_ty(v) == Type::i8(bcx.ccx) {
+ let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
+ let align = C_i32(bcx.ccx, align as i32);
+ base::call_memset(&bcx, base, v, size, align, false);
+ return bcx;
+ }
+ }
+
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
- self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
+ self.store_operand(bcx, llslot, align, tr_elem);
bcx.br(loop_bb);
})
}
dest.llval, dest.ty, dest.alignment);
let field_index = active_field_index.unwrap_or(i);
val.ty = LvalueTy::Downcast {
- adt_def: adt_def,
+ adt_def,
substs: self.monomorphize(&substs),
- variant_index: variant_index,
+ variant_index,
};
let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
self.store_operand(&bcx, lldest_i, align.to_align(), op);
}
};
let operand = OperandRef {
- val: val,
+ val,
ty: cast_ty
};
(bcx, operand)
}
mir::Rvalue::Len(ref lvalue) => {
- let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
+ let size = self.evaluate_array_len(&bcx, lvalue);
let operand = OperandRef {
- val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
+ val: OperandValue::Immediate(size),
ty: bcx.tcx().types.usize,
};
(bcx, operand)
}
}
+ fn evaluate_array_len(&mut self,
+ bcx: &Builder<'a, 'tcx>,
+ lvalue: &mir::Lvalue<'tcx>) -> ValueRef
+ {
+ // ZST are passed as operands and require special handling
+ // because trans_lvalue() panics if Local is operand.
+ if let mir::Lvalue::Local(index) = *lvalue {
+ if let LocalRef::Operand(Some(op)) = self.locals[index] {
+ if common::type_is_zero_size(bcx.ccx, op.ty) {
+ if let ty::TyArray(_, n) = op.ty.sty {
+ return common::C_uint(bcx.ccx, n);
+ }
+ }
+ }
+ }
+ // use common size calculation for non zero-sized types
+ let tr_value = self.trans_lvalue(&bcx, lvalue);
+ return tr_value.len(bcx.ccx);
+ }
+
pub fn trans_scalar_binop(&mut self,
bcx: &Builder<'a, 'tcx>,
op: mir::BinOp,