-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
-use rustc::mir;
-use rustc::mir::tcx::PlaceTy;
-use crate::MemFlags;
+use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
+
use crate::common::IntPredicate;
use crate::glue;
-
use crate::traits::*;
+use crate::MemFlags;
-use super::{FunctionCx, LocalRef};
-use super::operand::OperandValue;
+use rustc::mir;
+use rustc::mir::tcx::PlaceTy;
+use rustc::ty::layout::{self, Align, HasTyCtxt, LayoutOf, TyLayout, VariantIdx};
+use rustc::ty::{self, Ty};
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx, V> {
- /// Pointer to the contents of the place.
+ /// A pointer to the contents of the place.
pub llval: V,
- /// This place's extra data if it is unsized, or null.
+ /// This place's extra data if it is unsized, or `None` if null.
pub llextra: Option<V>,
- /// Monomorphized type of this place, including variant information.
+ /// The monomorphized type of this place, including variant information.
pub layout: TyLayout<'tcx>,
- /// What alignment we know for this place.
+ /// The alignment we know for this place.
pub align: Align,
}
-impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
- pub fn new_sized(
- llval: V,
- layout: TyLayout<'tcx>,
- align: Align,
- ) -> PlaceRef<'tcx, V> {
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ pub fn new_sized(llval: V, layout: TyLayout<'tcx>) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
- PlaceRef {
- llval,
- llextra: None,
- layout,
- align
- }
+ PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
- fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
- bx: &mut Bx,
- llval: V,
- layout: TyLayout<'tcx>,
- align: Align,
- ) -> PlaceRef<'tcx, V> {
- assert!(!bx.cx().type_has_metadata(layout.ty));
- PlaceRef {
- llval,
- llextra: None,
- layout,
- align
- }
+ pub fn new_sized_aligned(llval: V, layout: TyLayout<'tcx>, align: Align) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align }
}
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyLayout<'tcx>,
- name: &str
) -> Self {
- debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
- let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
- Self::new_sized(tmp, layout, layout.align.abi)
+ let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+ Self::new_sized(tmp, layout)
}
/// Returns a place for an indirect reference to an unsized place.
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyLayout<'tcx>,
- name: &str,
) -> Self {
- debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
let ptr_layout = bx.cx().layout_of(ptr_ty);
- Self::alloca(bx, ptr_layout, name)
+ Self::alloca(bx, ptr_layout)
}
- pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
- &self,
- cx: &Cx
- ) -> V {
+ pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert_eq!(count, 0);
bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
}
}
-
}
-impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
/// Access a field, at a point when the value's case is known.
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
- self, bx: &mut Bx,
+ self,
+ bx: &mut Bx,
ix: usize,
) -> Self {
let field = self.layout.field(bx.cx(), ix);
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
};
PlaceRef {
- // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
- llextra: if bx.cx().type_has_metadata(field.ty) {
- self.llextra
- } else {
- None
- },
+ llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field,
align: effective_field_align,
}
// Simple cases, which don't need DST adjustment:
// * no metadata available - just log the case
- // * known alignment - sized types, [T], str or a foreign type
+ // * known alignment - sized types, `[T]`, `str` or a foreign type
// * packed struct - there is no alignment padding
- match field.ty.sty {
+ match field.ty.kind {
_ if self.llextra.is_none() => {
- debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
- ix, self.llval);
+ debug!(
+ "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, self.llval
+ );
return simple();
}
_ if !field.is_unsized() => return simple(),
}
// We need to get the pointer manually now.
- // We do this by casting to a *i8, then offsetting it by the appropriate amount.
+ // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
// We do this instead of, say, simply adjusting the pointer from the result of a GEP
// because the field may have an arbitrary alignment in the LLVM representation
// anyway.
//
// To demonstrate:
- // struct Foo<T: ?Sized> {
- // x: u16,
- // y: T
- // }
//
- // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
// the `y` field has 16-bit alignment.
let meta = self.llextra;
// Bump the unaligned offset up to the appropriate alignment using the
// following expression:
//
- // (unaligned offset + (align - 1)) & -align
+ // (unaligned offset + (align - 1)) & -align
- // Calculate offset
+ // Calculate offset.
let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
let and_lhs = bx.add(unaligned_offset, align_sub_1);
let and_rhs = bx.neg(unsized_align);
debug!("struct_field_ptr: DST field offset: {:?}", offset);
- // Cast and adjust pointer
+ // Cast and adjust pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
let byte_ptr = bx.gep(byte_ptr, &[offset]);
- // Finally, cast back to the type expected
+ // Finally, cast back to the type expected.
let ll_fty = bx.cx().backend_type(field);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &mut Bx,
- cast_to: Ty<'tcx>
+ cast_to: Ty<'tcx>,
) -> V {
let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
if self.layout.abi.is_uninhabited() {
}
let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
layout::Variants::Single { index } => {
- let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
+ let discr_val = self
+ .layout
+ .ty
+ .discriminant_for_variant(bx.cx().tcx(), index)
.map_or(index.as_u32() as u128, |discr| discr.val);
return bx.cx().const_uint_big(cast_to, discr_val);
}
}
};
- let discr = self.project_field(bx, discr_index);
- let lldiscr = bx.load_operand(discr).immediate();
+ // Read the tag/niche-encoded discriminant from memory.
+ let encoded_discr = self.project_field(bx, discr_index);
+ let encoded_discr = bx.load_operand(encoded_discr);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
match *discr_kind {
layout::DiscriminantKind::Tag => {
let signed = match discr_scalar.value {
// We use `i1` for bytes that are always `0` or `1`,
// e.g., `#[repr(i8)] enum E { A, B }`, but we can't
// let LLVM interpret the `i1` as signed, because
- // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
+ // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
- _ => false
+ _ => false,
};
- bx.intcast(lldiscr, cast_to, signed)
+ bx.intcast(encoded_discr.immediate(), cast_to, signed)
}
layout::DiscriminantKind::Niche {
dataful_variant,
ref niche_variants,
niche_start,
} => {
- let niche_llty = bx.cx().immediate_backend_type(discr.layout);
- if niche_variants.start() == niche_variants.end() {
- // FIXME(eddyb) Check the actual primitive type here.
- let niche_llval = if niche_start == 0 {
- // HACK(eddyb) Using `c_null` as it works on all types.
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+ let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
+ let encoded_discr = encoded_discr.immediate();
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ // Avoid subtracting `0`, which wouldn't work for pointers.
+ // FIXME(eddyb) check the actual primitive type here.
+ encoded_discr
+ } else {
+ bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = {
+ let relative_max = if relative_max == 0 {
+ // Avoid calling `const_uint`, which wouldn't work for pointers.
+ // FIXME(eddyb) check the actual primitive type here.
bx.cx().const_null(niche_llty)
} else {
- bx.cx().const_uint_big(niche_llty, niche_start)
+ bx.cx().const_uint(niche_llty, relative_max as u64)
};
- let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval);
- bx.select(select_arg,
+ bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ bx.cx().const_uint(cast_to, 0)
+ } else {
+ bx.intcast(relative_discr, cast_to, false)
+ };
+ bx.add(
+ relative_discr,
bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
- bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
- } else {
- // Rebase from niche values to discriminant values.
- let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
- let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
- let lldiscr_max =
- bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
- let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max);
- let cast = bx.intcast(lldiscr, cast_to, false);
- bx.select(select_arg,
- cast,
- bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
- }
+ )
+ };
+
+ bx.select(
+ is_niche,
+ niche_discr,
+ bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+ )
}
}
}
pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
- variant_index: VariantIdx
+ variant_index: VariantIdx,
) {
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
return;
}
match self.layout.variants {
bx.store(
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.llval,
- ptr.align);
+ ptr.align,
+ );
}
layout::Variants::Multiple {
- discr_kind: layout::DiscriminantKind::Niche {
- dataful_variant,
- ref niche_variants,
- niche_start,
- },
+ discr_kind:
+ layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start },
discr_index,
..
} => {
if variant_index != dataful_variant {
- if bx.cx().sess().target.target.arch == "arm" ||
- bx.cx().sess().target.target.arch == "aarch64" {
- // Issue #34427: As workaround for LLVM bug on ARM,
+ if bx.cx().sess().target.target.arch == "arm"
+ || bx.cx().sess().target.target.arch == "aarch64"
+ {
+ // FIXME(#34427): as workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0);
let size = bx.cx().const_usize(self.layout.size.bytes());
let niche = self.project_field(bx, discr_index);
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
- let niche_value = (niche_value as u128)
- .wrapping_add(niche_start);
- // FIXME(eddyb) Check the actual primitive type here.
+ let niche_value = (niche_value as u128).wrapping_add(niche_start);
+ // FIXME(eddyb): check the actual primitive type here.
let niche_llval = if niche_value == 0 {
- // HACK(eddyb) Using `c_null` as it works on all types.
+ // HACK(eddyb): using `c_null` as it works on all types.
bx.cx().const_null(niche_llty)
} else {
bx.cx().const_uint_big(niche_llty, niche_value)
pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
- llindex: V
+ llindex: V,
) -> Self {
// Statically compute the offset if we can, otherwise just use the element size,
// as this will yield the lowest alignment.
let layout = self.layout.field(bx, 0);
- let offset = if bx.is_const_integral(llindex) {
- layout.size.checked_mul(bx.const_to_uint(llindex), bx).unwrap_or(layout.size)
+ let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
+ layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
} else {
layout.size
};
pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
- variant_index: VariantIdx
+ variant_index: VariantIdx,
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
}
}
-impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_place(
&mut self,
bx: &mut Bx,
- place: &mir::Place<'tcx>
+ place_ref: mir::PlaceRef<'tcx>,
) -> PlaceRef<'tcx, Bx::Value> {
- debug!("codegen_place(place={:?})", place);
-
+ debug!("codegen_place(place_ref={:?})", place_ref);
let cx = self.cx;
let tcx = self.cx.tcx();
- if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place {
- match self.locals[index] {
+ let result = match place_ref {
+ mir::PlaceRef { local, projection: [] } => match self.locals[local] {
LocalRef::Place(place) => {
return place;
}
return bx.load_operand(place).deref(cx);
}
LocalRef::Operand(..) => {
- bug!("using operand local {:?} as place", place);
+ bug!("using operand local {:?} as place", place_ref);
}
- }
- }
-
- let result = match *place {
- mir::Place::Base(mir::PlaceBase::Local(_)) => bug!(), // handled above
- mir::Place::Base(
- mir::PlaceBase::Static(
- box mir::Static { ty, kind: mir::StaticKind::Promoted(promoted) }
- )
- ) => {
- let param_env = ty::ParamEnv::reveal_all();
- let cid = mir::interpret::GlobalId {
- instance: self.instance,
- promoted: Some(promoted),
- };
- let layout = cx.layout_of(self.monomorphize(&ty));
- match bx.tcx().const_eval(param_env.and(cid)) {
- Ok(val) => match val.val {
- mir::interpret::ConstValue::ByRef(ptr, alloc) => {
- bx.cx().from_const_alloc(layout, alloc, ptr.offset)
- }
- _ => bug!("promoteds should have an allocation: {:?}", val),
- },
- Err(_) => {
- // this is unreachable as long as runtime
- // and compile-time agree on values
- // With floats that won't always be true
- // so we generate an abort
- bx.abort();
- let llval = bx.cx().const_undef(
- bx.cx().type_ptr_to(bx.cx().backend_type(layout))
- );
- PlaceRef::new_sized(llval, layout, layout.align.abi)
- }
- }
- }
- mir::Place::Base(
- mir::PlaceBase::Static(
- box mir::Static { ty, kind: mir::StaticKind::Static(def_id) }
- )
- ) => {
- // NB: The layout of a static may be unsized as is the case when working
- // with a static that is an extern_type.
- let layout = cx.layout_of(self.monomorphize(&ty));
- let static_ = bx.get_static(def_id);
- PlaceRef::new_thin_place(bx, static_, layout, layout.align.abi)
},
- mir::Place::Projection(box mir::Projection {
- ref base,
- elem: mir::ProjectionElem::Deref
- }) => {
+ mir::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
// Load the pointer from its location.
- self.codegen_consume(bx, base).deref(bx.cx())
+ self.codegen_consume(bx, mir::PlaceRef { local, projection: proj_base })
+ .deref(bx.cx())
}
- mir::Place::Projection(ref projection) => {
- let cg_base = self.codegen_place(bx, &projection.base);
+ mir::PlaceRef { local, projection: [proj_base @ .., elem] } => {
+ // FIXME turn this recursion into iteration
+ let cg_base =
+ self.codegen_place(bx, mir::PlaceRef { local, projection: proj_base });
- match projection.elem {
+ match elem {
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
cg_base.project_field(bx, field.index())
}
mir::ProjectionElem::Index(index) => {
- let index = &mir::Operand::Copy(
- mir::Place::Base(mir::PlaceBase::Local(index))
- );
+ let index = &mir::Operand::Copy(mir::Place::from(*index));
let index = self.codegen_operand(bx, index);
let llindex = index.immediate();
cg_base.project_index(bx, llindex)
}
- mir::ProjectionElem::ConstantIndex { offset,
- from_end: false,
- min_length: _ } => {
- let lloffset = bx.cx().const_usize(offset as u64);
+ mir::ProjectionElem::ConstantIndex {
+ offset,
+ from_end: false,
+ min_length: _,
+ } => {
+ let lloffset = bx.cx().const_usize(*offset as u64);
cg_base.project_index(bx, lloffset)
}
- mir::ProjectionElem::ConstantIndex { offset,
- from_end: true,
- min_length: _ } => {
- let lloffset = bx.cx().const_usize(offset as u64);
+ mir::ProjectionElem::ConstantIndex {
+ offset,
+ from_end: true,
+ min_length: _,
+ } => {
+ let lloffset = bx.cx().const_usize(*offset as u64);
let lllen = cg_base.len(bx.cx());
let llindex = bx.sub(lllen, lloffset);
cg_base.project_index(bx, llindex)
}
- mir::ProjectionElem::Subslice { from, to } => {
- let mut subslice = cg_base.project_index(bx,
- bx.cx().const_usize(from as u64));
- let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
- .projection_ty(tcx, &projection.elem).ty;
+ mir::ProjectionElem::Subslice { from, to, from_end } => {
+ let mut subslice =
+ cg_base.project_index(bx, bx.cx().const_usize(*from as u64));
+ let projected_ty =
+ PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
- subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
- bx.cx().const_usize((from as u64) + (to as u64))));
+ assert!(from_end, "slice subslices should be `from_end`");
+ subslice.llextra = Some(bx.sub(
+ cg_base.llextra.unwrap(),
+ bx.cx().const_usize((*from as u64) + (*to as u64)),
+ ));
}
// Cast the place pointer type to the new
- // array or slice type (*[%_; new_len]).
- subslice.llval = bx.pointercast(subslice.llval,
- bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
+ // array or slice type (`*[%_; new_len]`).
+ subslice.llval = bx.pointercast(
+ subslice.llval,
+ bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+ );
subslice
}
- mir::ProjectionElem::Downcast(_, v) => {
- cg_base.project_downcast(bx, v)
- }
+ mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, *v),
}
}
};
- debug!("codegen_place(place={:?}) => {:?}", place, result);
+ debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
result
}
- pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
+ pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx();
- let place_ty = place.ty(self.mir, tcx);
+ let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, *self.mir, tcx);
self.monomorphize(&place_ty.ty)
}
}