ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Immediate, 56);
+
impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
#[inline(always)]
fn from(val: ScalarMaybeUninit<Tag>) -> Self {
pub layout: TyAndLayout<'tcx>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
+
impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter
}
ScalarMaybeUninit::Uninit => cx.typed_value(
|mut this| {
- this.write_str("{uninit ")?;
+ this.write_str("uninit ")?;
Ok(this)
},
|this| this.print_type(ty),
pub layout: TyAndLayout<'tcx>,
}
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80);
+
impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>;
#[inline(always)]
}
}
+impl<'tcx, Tag: Copy> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
+ #[inline(always)]
+ fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
+ OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
+ }
+}
+
impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
#[inline(always)]
fn from(val: ImmTy<'tcx, Tag>) -> Self {
#[inline]
pub fn force_op_ptr(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
match op.try_as_mplace(self) {
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
/// Returns `None` if the layout does not permit loading this as a value.
fn try_read_immediate_from_mplace(
&self,
- mplace: MPlaceTy<'tcx, M::PointerTag>,
+ mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
if mplace.layout.is_unsized() {
// Don't touch unsized
/// in a `Immediate`, not on which data is stored there currently.
pub(crate) fn try_read_immediate(
&self,
- src: OpTy<'tcx, M::PointerTag>,
+ src: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
Ok(match src.try_as_mplace(self) {
- Ok(mplace) => {
+ Ok(ref mplace) => {
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
Ok(val)
} else {
- Err(mplace)
+ Err(*mplace)
}
}
Err(val) => Ok(val),
#[inline(always)]
pub fn read_immediate(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
if let Ok(imm) = self.try_read_immediate(op)? {
Ok(imm)
/// Read a scalar from a place
pub fn read_scalar(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
}
// Turn the wide MPlace into a string (must already be dereferenced!)
- pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
+ pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
/// Projection functions
pub fn operand_field(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace(self) {
- Ok(mplace) => {
+ Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?;
return Ok(field.into());
pub fn operand_index(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
index: u64,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
if let Ok(index) = usize::try_from(index) {
} else {
// Indexing into a big array. This must be an mplace.
let mplace = op.assert_mem_place(self);
- Ok(self.mplace_index(mplace, index)?.into())
+ Ok(self.mplace_index(&mplace, index)?.into())
}
}
pub fn operand_downcast(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout
Ok(match op.try_as_mplace(self) {
- Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(),
+ Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
Err(..) => {
let layout = op.layout.for_variant(self, variant);
- OpTy { layout, ..op }
+ OpTy { layout, ..*op }
}
})
}
pub fn operand_projection(
&self,
- base: OpTy<'tcx, M::PointerTag>,
+ base: &OpTy<'tcx, M::PointerTag>,
proj_elem: mir::PlaceElem<'tcx>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc_middle::mir::ProjectionElem::*;
// The rest should only occur as mplace, we do not use Immediates for types
// allowing such operations. This matches place_projection forcing an allocation.
let mplace = base.assert_mem_place(self);
- self.mplace_projection(mplace, proj_elem)?.into()
+ self.mplace_projection(&mplace, proj_elem)?.into()
}
})
}
#[inline(always)]
pub fn place_to_op(
&self,
- place: PlaceTy<'tcx, M::PointerTag>,
+ place: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
- let op = match *place {
+ let op = match **place {
Place::Ptr(mplace) => Operand::Indirect(mplace),
Place::Local { frame, local } => {
*self.access_local(&self.stack()[frame], local, None)?
let op = place
.projection
.iter()
- .try_fold(base_op, |op, elem| self.operand_projection(op, elem))?;
+ .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
trace!("eval_place_to_op: got {:?}", *op);
// Sanity-check the type we ended up with.
/// Evaluate the operand, returning a place where you can then find the data.
/// If you already know the layout, you can save two table lookups
/// by passing it in here.
+ #[inline]
pub fn eval_operand(
&self,
mir_op: &mir::Operand<'tcx>,
// * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all
// checked yet.
// * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
- self.const_to_op(val, layout)?
+
+ self.mir_const_to_op(&val, layout)?
}
};
trace!("{:?}: {:?}", mir_op, *op);
val: &ty::Const<'tcx>,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
- let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
- Ok(match scalar {
- Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
- Scalar::Int(int) => Scalar::Int(int),
- })
- };
- // Early-return cases.
- let val_val = match val.val {
+ match val.val {
ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
ty::ConstKind::Unevaluated(def, substs, promoted) => {
let instance = self.resolve(def, substs)?;
- return Ok(self.eval_to_allocation(GlobalId { instance, promoted })?.into());
+ Ok(self.eval_to_allocation(GlobalId { instance, promoted })?.into())
}
ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
}
- ty::ConstKind::Value(val_val) => val_val,
- };
+ ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout),
+ }
+ }
+
+ crate fn mir_const_to_op(
+ &self,
+ val: &mir::ConstantKind<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+ match val {
+ mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout),
+ mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, None),
+ }
+ }
+
+ crate fn const_val_to_op(
+ &self,
+ val_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Other cases need layout.
- let layout =
- from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(val.ty))?;
+ let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
+ Ok(match scalar {
+ Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
+ Scalar::Int(int) => Scalar::Int(int),
+ })
+ };
+ let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
let op = match val_val {
ConstValue::ByRef { alloc, offset } => {
let id = self.tcx.create_memory_alloc(alloc);
/// Read discriminant, return the runtime value as well as the variant index.
pub fn read_discriminant(
&self,
- op: OpTy<'tcx, M::PointerTag>,
+ op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
trace!("read_discriminant_value {:#?}", op.layout);
// Get type and layout of the discriminant.
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
// Read tag and sanity-check `tag_layout`.
- let tag_val = self.read_immediate(self.operand_field(op, tag_field)?)?;
+ let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
let tag_val = tag_val.to_scalar()?;
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
- self.binary_op(mir::BinOp::Sub, tag_val, niche_start_val)?;
+ self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative = variant_index_relative_val
.to_scalar()?
.assert_bits(tag_val.layout.size);