use std::hash::Hash;
use super::{
- alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
- MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
+ alloc_range, CheckInAllocMsg, GlobalAlloc, Immediate, InterpCx, InterpResult, MPlaceTy,
+ Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
};
macro_rules! throw_validation_failure {
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
match tail.kind() {
ty::Dynamic(..) => {
- let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
+ let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta())?;
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
- self.ecx.memory.check_ptr_access_align(
+ self.ecx.check_ptr_access_align(
vtable,
3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
self.ecx.tcx.data_layout.pointer_align.abi,
.unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
- self.ecx.memory.check_ptr_access_align(
+ self.ecx.check_ptr_access_align(
place.ptr,
size,
align,
if let Some(ref mut ref_tracking) = self.ref_tracking {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
- if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
+ if let Ok((alloc_id, _offset, _tag)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
// Special handling for pointers to statics (irrespective of their type).
let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
))
}
+ fn read_immediate_forced(
+ &self,
+ op: &OpTy<'tcx, M::PointerTag>,
+ ) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
+ Ok(*try_validation!(
+ self.ecx.read_immediate_raw(op, /*force*/ true),
+ self.path,
+ err_unsup!(ReadPointerAsBytes) => { "(potentially part of) a pointer" } expected { "plain (non-pointer) bytes" },
+ ).unwrap())
+ }
+
/// Check if this is a value of primitive type, and if yes check the validity of the value
/// at that type. Return `true` if the type is indeed primitive.
fn try_visit_primitive(
// If we check references recursively, also check that this points to a function.
if let Some(_) = self.ref_tracking {
- let ptr = self.ecx.scalar_to_ptr(value);
+ let ptr = self.ecx.scalar_to_ptr(value)?;
let _fn = try_validation!(
- self.ecx.memory.get_fn(ptr),
+ self.ecx.get_ptr_fn(ptr),
self.path,
err_ub!(DanglingIntPointer(0, _)) =>
{ "a null function pointer" },
// FIXME: Check if the signature matches
} else {
// Otherwise (for standalone Miri), we have to still check it to be non-null.
- if self.ecx.scalar_may_be_null(value) {
+ if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path, { "a null function pointer" });
}
}
fn visit_scalar(
&mut self,
- op: &OpTy<'tcx, M::PointerTag>,
+ scalar: ScalarMaybeUninit<M::PointerTag>,
scalar_layout: ScalarAbi,
) -> InterpResult<'tcx> {
- if scalar_layout.valid_range.is_full_for(op.layout.size) {
- // Nothing to check
+ // We check `is_full_range` in a slightly complicated way because *if* we are checking
+ // number validity, then we want to ensure that `Scalar::Initialized` is indeed initialized,
+ // i.e. that we go over the `check_init` below.
+ let size = scalar_layout.size(self.ecx);
+ let is_full_range = match scalar_layout {
+ ScalarAbi::Initialized { .. } => {
+ if M::enforce_number_validity(self.ecx) {
+ false // not "full" since uninit is not accepted
+ } else {
+ scalar_layout.is_always_valid(self.ecx)
+ }
+ }
+ ScalarAbi::Union { .. } => true,
+ };
+ if is_full_range {
+ // Nothing to check. Cruciall we don't even `read_scalar` until here, since that would
+ // fail for `Union` scalars!
return Ok(());
}
- // At least one value is excluded.
- let valid_range = scalar_layout.valid_range;
+ // We have something to check: it must at least be initialized.
+ let valid_range = scalar_layout.valid_range(self.ecx);
let WrappingRange { start, end } = valid_range;
- let max_value = op.layout.size.unsigned_int_max();
+ let max_value = size.unsigned_int_max();
assert!(end <= max_value);
- // Determine the allowed range
- let value = self.read_scalar(op)?;
let value = try_validation!(
- value.check_init(),
+ scalar.check_init(),
self.path,
- err_ub!(InvalidUninitBytes(None)) => { "{:x}", value }
+ err_ub!(InvalidUninitBytes(None)) => { "{:x}", scalar }
expected { "something {}", wrapping_range_format(valid_range, max_value) },
);
let bits = match value.try_to_int() {
+ Ok(int) => int.assert_bits(size),
Err(_) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
+ // We support 2 kinds of ranges here: full range, and excluding zero.
if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null.
- if self.ecx.scalar_may_be_null(value) {
+ if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path,
{ "a potentially null pointer" }
expected {
wrapping_range_format(valid_range, max_value)
}
)
+ } else {
+ return Ok(());
}
+ } else if scalar_layout.is_always_valid(self.ecx) {
+ // Easy. (This is reachable if `enforce_number_validity` is set.)
return Ok(());
} else {
// Conservatively, we reject, because the pointer *could* have a bad
)
}
}
- Ok(int) => int.assert_bits(op.layout.size),
};
- // Now compare. This is slightly subtle because this is a special "wrap-around" range.
+ // Now compare.
if valid_range.contains(bits) {
Ok(())
} else {
);
}
Abi::Scalar(scalar_layout) => {
- self.visit_scalar(op, scalar_layout)?;
+ let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
+ self.visit_scalar(scalar, scalar_layout)?;
+ }
+ Abi::ScalarPair(a_layout, b_layout) => {
+ // We would validate these things as we descend into the fields,
+ // but that can miss bugs in layout computation. Layout computation
+ // is subtle due to enums having ScalarPair layout, where one field
+ // is the discriminant.
+ if cfg!(debug_assertions) {
+ let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
+ self.visit_scalar(a, a_layout)?;
+ self.visit_scalar(b, b_layout)?;
+ }
}
- Abi::ScalarPair { .. } | Abi::Vector { .. } => {
- // These have fields that we already visited above, so we already checked
- // all their scalar-level restrictions.
- // There is also no equivalent to `rustc_layout_scalar_valid_range_start`
- // that would make skipping them here an issue.
+ Abi::Vector { .. } => {
+ // No checks here, we assume layout computation gets this right.
+ // (This is harder to check since Miri does not represent these as `Immediate`.)
}
Abi::Aggregate { .. } => {
// Nothing to do.
let mplace = op.assert_mem_place(); // strings are never immediate
let len = mplace.len(self.ecx)?;
try_validation!(
- self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
+ self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)),
self.path,
err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
err_unsup!(ReadPointerAsBytes) => { "a pointer in `str`" },
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept uninit, for consistency with the slow path.
- let Some(alloc) = self.ecx.memory.get(mplace.ptr, size, mplace.align)? else {
+ let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)? else {
// Size 0, nothing more to check.
return Ok(());
};