]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_const_eval/src/interpret/operand.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / operand.rs
1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
3
4 use std::convert::TryFrom;
5 use std::fmt::Write;
6
7 use rustc_hir::def::Namespace;
8 use rustc_macros::HashStable;
9 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
10 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
11 use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
12 use rustc_middle::{mir, ty};
13 use rustc_target::abi::{self, Abi, HasDataLayout, Size, TagEncoding};
14 use rustc_target::abi::{VariantIdx, Variants};
15
16 use super::{
17 alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
18 InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer,
19 PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
20 };
21
22 /// An `Immediate` represents a single immediate self-contained Rust value.
23 ///
24 /// For optimization of a few very common cases, there is also a representation for a pair of
25 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
26 /// operations and wide pointers. This idea was taken from rustc's codegen.
27 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
28 /// defined on `Immediate`, and do not have to work with a `Place`.
29 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
30 pub enum Immediate<Tag: Provenance = AllocId> {
31 Scalar(ScalarMaybeUninit<Tag>),
32 ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
33 }
34
35 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
36 rustc_data_structures::static_assert_size!(Immediate, 56);
37
38 impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
39 #[inline(always)]
40 fn from(val: ScalarMaybeUninit<Tag>) -> Self {
41 Immediate::Scalar(val)
42 }
43 }
44
45 impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
46 #[inline(always)]
47 fn from(val: Scalar<Tag>) -> Self {
48 Immediate::Scalar(val.into())
49 }
50 }
51
52 impl<'tcx, Tag: Provenance> Immediate<Tag> {
53 pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
54 Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
55 }
56
57 pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
58 Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
59 }
60
61 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
62 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
63 }
64
65 pub fn new_dyn_trait(
66 val: Scalar<Tag>,
67 vtable: Pointer<Option<Tag>>,
68 cx: &impl HasDataLayout,
69 ) -> Self {
70 Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
71 }
72
73 #[inline]
74 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
75 match self {
76 Immediate::Scalar(val) => val,
77 Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
78 }
79 }
80
81 #[inline]
82 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
83 self.to_scalar_or_uninit().check_init()
84 }
85
86 #[inline]
87 pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>) {
88 match self {
89 Immediate::ScalarPair(val1, val2) => (val1, val2),
90 Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
91 }
92 }
93
94 #[inline]
95 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
96 let (val1, val2) = self.to_scalar_or_uninit_pair();
97 Ok((val1.check_init()?, val2.check_init()?))
98 }
99 }
100
101 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
102 // as input for binary and cast operations.
103 #[derive(Copy, Clone, Debug)]
104 pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
105 imm: Immediate<Tag>,
106 pub layout: TyAndLayout<'tcx>,
107 }
108
109 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
110 rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
111
112 impl<Tag: Provenance> std::fmt::Display for ImmTy<'_, Tag> {
113 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
114 /// Helper function for printing a scalar to a FmtPrinter
115 fn p<'a, 'tcx, Tag: Provenance>(
116 cx: FmtPrinter<'a, 'tcx>,
117 s: ScalarMaybeUninit<Tag>,
118 ty: Ty<'tcx>,
119 ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
120 match s {
121 ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
122 cx.pretty_print_const_scalar_int(int, ty, true)
123 }
124 ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
125 // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
126 // print what is points to, which would fail since it has no access to the local
127 // memory.
128 cx.pretty_print_const_pointer(ptr, ty, true)
129 }
130 ScalarMaybeUninit::Uninit => cx.typed_value(
131 |mut this| {
132 this.write_str("uninit ")?;
133 Ok(this)
134 },
135 |this| this.print_type(ty),
136 " ",
137 ),
138 }
139 }
140 ty::tls::with(|tcx| {
141 match self.imm {
142 Immediate::Scalar(s) => {
143 if let Some(ty) = tcx.lift(self.layout.ty) {
144 let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
145 f.write_str(&p(cx, s, ty)?.into_buffer())?;
146 return Ok(());
147 }
148 write!(f, "{:x}: {}", s, self.layout.ty)
149 }
150 Immediate::ScalarPair(a, b) => {
151 // FIXME(oli-obk): at least print tuples and slices nicely
152 write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty,)
153 }
154 }
155 })
156 }
157 }
158
159 impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
160 type Target = Immediate<Tag>;
161 #[inline(always)]
162 fn deref(&self) -> &Immediate<Tag> {
163 &self.imm
164 }
165 }
166
167 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
168 /// or still in memory. The latter is an optimization, to delay reading that chunk of
169 /// memory and to avoid having to store arbitrary-sized data here.
170 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
171 pub enum Operand<Tag: Provenance = AllocId> {
172 Immediate(Immediate<Tag>),
173 Indirect(MemPlace<Tag>),
174 }
175
176 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
177 pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
178 op: Operand<Tag>, // Keep this private; it helps enforce invariants.
179 pub layout: TyAndLayout<'tcx>,
180 }
181
182 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
183 rustc_data_structures::static_assert_size!(OpTy<'_>, 80);
184
185 impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
186 type Target = Operand<Tag>;
187 #[inline(always)]
188 fn deref(&self) -> &Operand<Tag> {
189 &self.op
190 }
191 }
192
193 impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
194 #[inline(always)]
195 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
196 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
197 }
198 }
199
200 impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
201 #[inline(always)]
202 fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
203 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
204 }
205 }
206
207 impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
208 #[inline(always)]
209 fn from(val: ImmTy<'tcx, Tag>) -> Self {
210 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
211 }
212 }
213
214 impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
215 #[inline]
216 pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
217 ImmTy { imm: val.into(), layout }
218 }
219
220 #[inline]
221 pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
222 ImmTy { imm, layout }
223 }
224
225 #[inline]
226 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
227 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
228 }
229 #[inline]
230 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
231 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
232 }
233
234 #[inline]
235 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
236 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
237 }
238
239 #[inline]
240 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
241 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
242 }
243
244 #[inline]
245 pub fn to_const_int(self) -> ConstInt {
246 assert!(self.layout.ty.is_integral());
247 let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
248 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
249 }
250 }
251
252 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
253 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
254 /// Returns `None` if the layout does not permit loading this as a value.
255 ///
256 /// This is an internal function; call `read_immediate` instead.
257 fn read_immediate_from_mplace_raw(
258 &self,
259 mplace: &MPlaceTy<'tcx, M::PointerTag>,
260 force: bool,
261 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
262 if mplace.layout.is_unsized() {
263 // Don't touch unsized
264 return Ok(None);
265 }
266
267 let Some(alloc) = self.get_place_alloc(mplace)? else {
268 return Ok(Some(ImmTy {
269 // zero-sized type
270 imm: Scalar::ZST.into(),
271 layout: mplace.layout,
272 }));
273 };
274
275 // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
276 // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
277 // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
278 // case where some of the bytes are initialized and others are not. So, we need an extra
279 // check that walks over the type of `mplace` to make sure it is truly correct to treat this
280 // like a `Scalar` (or `ScalarPair`).
281 let scalar_layout = match mplace.layout.abi {
282 // `if` does not work nested inside patterns, making this a bit awkward to express.
283 Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
284 Abi::Scalar(s) if force => Some(s.primitive()),
285 _ => None,
286 };
287 let read_provenance = |s: abi::Primitive, size| {
288 // Should be just `s.is_ptr()`, but we support a Miri flag that accepts more
289 // questionable ptr-int transmutes.
290 let number_may_have_provenance = !M::enforce_number_no_provenance(self);
291 s.is_ptr() || (number_may_have_provenance && size == self.pointer_size())
292 };
293 if let Some(s) = scalar_layout {
294 //FIXME(#96185): let size = s.size(self);
295 //FIXME(#96185): assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
296 let size = mplace.layout.size; //FIXME(#96185): remove this line
297 let scalar =
298 alloc.read_scalar(alloc_range(Size::ZERO, size), read_provenance(s, size))?;
299 return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
300 }
301 let scalar_pair_layout = match mplace.layout.abi {
302 Abi::ScalarPair(
303 abi::Scalar::Initialized { value: a, .. },
304 abi::Scalar::Initialized { value: b, .. },
305 ) => Some((a, b)),
306 Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
307 _ => None,
308 };
309 if let Some((a, b)) = scalar_pair_layout {
310 // We checked `ptr_align` above, so all fields will have the alignment they need.
311 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
312 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
313 let (a_size, b_size) = (a.size(self), b.size(self));
314 let b_offset = a_size.align_to(b.align(self).abi);
315 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
316 let a_val =
317 alloc.read_scalar(alloc_range(Size::ZERO, a_size), read_provenance(a, a_size))?;
318 let b_val =
319 alloc.read_scalar(alloc_range(b_offset, b_size), read_provenance(b, b_size))?;
320 return Ok(Some(ImmTy {
321 imm: Immediate::ScalarPair(a_val, b_val),
322 layout: mplace.layout,
323 }));
324 }
325 // Neither a scalar nor scalar pair.
326 return Ok(None);
327 }
328
329 /// Try returning an immediate for the operand. If the layout does not permit loading this as an
330 /// immediate, return where in memory we can find the data.
331 /// Note that for a given layout, this operation will either always fail or always
332 /// succeed! Whether it succeeds depends on whether the layout can be represented
333 /// in an `Immediate`, not on which data is stored there currently.
334 ///
335 /// If `force` is `true`, then even scalars with fields that can be ununit will be
336 /// read. This means the load is lossy and should not be written back!
337 /// This flag exists only for validity checking.
338 ///
339 /// This is an internal function that should not usually be used; call `read_immediate` instead.
340 pub fn read_immediate_raw(
341 &self,
342 src: &OpTy<'tcx, M::PointerTag>,
343 force: bool,
344 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
345 Ok(match src.try_as_mplace() {
346 Ok(ref mplace) => {
347 if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
348 Ok(val)
349 } else {
350 Err(*mplace)
351 }
352 }
353 Err(val) => Ok(val),
354 })
355 }
356
357 /// Read an immediate from a place, asserting that that is possible with the given layout.
358 #[inline(always)]
359 pub fn read_immediate(
360 &self,
361 op: &OpTy<'tcx, M::PointerTag>,
362 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
363 if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
364 Ok(imm)
365 } else {
366 span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
367 }
368 }
369
370 /// Read a scalar from a place
371 pub fn read_scalar(
372 &self,
373 op: &OpTy<'tcx, M::PointerTag>,
374 ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
375 Ok(self.read_immediate(op)?.to_scalar_or_uninit())
376 }
377
378 /// Read a pointer from a place.
379 pub fn read_pointer(
380 &self,
381 op: &OpTy<'tcx, M::PointerTag>,
382 ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
383 self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
384 }
385
386 // Turn the wide MPlace into a string (must already be dereferenced!)
387 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
388 let len = mplace.len(self)?;
389 let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
390 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
391 Ok(str)
392 }
393
394 /// Projection functions
395 pub fn operand_field(
396 &self,
397 op: &OpTy<'tcx, M::PointerTag>,
398 field: usize,
399 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
400 let base = match op.try_as_mplace() {
401 Ok(ref mplace) => {
402 // We can reuse the mplace field computation logic for indirect operands.
403 let field = self.mplace_field(mplace, field)?;
404 return Ok(field.into());
405 }
406 Err(value) => value,
407 };
408
409 let field_layout = base.layout.field(self, field);
410 let offset = base.layout.fields.offset(field);
411 // This makes several assumptions about what layouts we will encounter; we match what
412 // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
413 let field_val: Immediate<_> = match (*base, base.layout.abi) {
414 // the field contains no information
415 _ if field_layout.is_zst() => Scalar::ZST.into(),
416 // the field covers the entire type
417 _ if field_layout.size == base.layout.size => {
418 assert!(match (base.layout.abi, field_layout.abi) {
419 (Abi::Scalar(..), Abi::Scalar(..)) => true,
420 (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
421 _ => false,
422 });
423 assert!(offset.bytes() == 0);
424 *base
425 }
426 // extract fields from types with `ScalarPair` ABI
427 (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
428 assert!(matches!(field_layout.abi, Abi::Scalar(..)));
429 Immediate::from(if offset.bytes() == 0 {
430 debug_assert_eq!(field_layout.size, a.size(self));
431 a_val
432 } else {
433 debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
434 debug_assert_eq!(field_layout.size, b.size(self));
435 b_val
436 })
437 }
438 _ => span_bug!(
439 self.cur_span(),
440 "invalid field access on immediate {}, layout {:#?}",
441 base,
442 base.layout
443 ),
444 };
445
446 Ok(OpTy { op: Operand::Immediate(field_val), layout: field_layout })
447 }
448
449 pub fn operand_index(
450 &self,
451 op: &OpTy<'tcx, M::PointerTag>,
452 index: u64,
453 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
454 if let Ok(index) = usize::try_from(index) {
455 // We can just treat this as a field.
456 self.operand_field(op, index)
457 } else {
458 // Indexing into a big array. This must be an mplace.
459 let mplace = op.assert_mem_place();
460 Ok(self.mplace_index(&mplace, index)?.into())
461 }
462 }
463
464 pub fn operand_downcast(
465 &self,
466 op: &OpTy<'tcx, M::PointerTag>,
467 variant: VariantIdx,
468 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
469 Ok(match op.try_as_mplace() {
470 Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
471 Err(..) => {
472 // Downcasts only change the layout.
473 // (In particular, no check about whether this is even the active variant -- that's by design,
474 // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
475 let layout = op.layout.for_variant(self, variant);
476 OpTy { layout, ..*op }
477 }
478 })
479 }
480
481 #[instrument(skip(self), level = "debug")]
482 pub fn operand_projection(
483 &self,
484 base: &OpTy<'tcx, M::PointerTag>,
485 proj_elem: mir::PlaceElem<'tcx>,
486 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
487 use rustc_middle::mir::ProjectionElem::*;
488 Ok(match proj_elem {
489 Field(field, _) => self.operand_field(base, field.index())?,
490 Downcast(_, variant) => self.operand_downcast(base, variant)?,
491 Deref => self.deref_operand(base)?.into(),
492 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
493 // The rest should only occur as mplace, we do not use Immediates for types
494 // allowing such operations. This matches place_projection forcing an allocation.
495 let mplace = base.assert_mem_place();
496 self.mplace_projection(&mplace, proj_elem)?.into()
497 }
498 })
499 }
500
501 /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
502 /// Also returns the number of elements.
503 pub fn operand_to_simd(
504 &self,
505 base: &OpTy<'tcx, M::PointerTag>,
506 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
507 // Basically we just transmute this place into an array following simd_size_and_type.
508 // This only works in memory, but repr(simd) types should never be immediates anyway.
509 assert!(base.layout.ty.is_simd());
510 self.mplace_to_simd(&base.assert_mem_place())
511 }
512
513 /// Read from a local. Will not actually access the local if reading from a ZST.
514 /// Will not access memory, instead an indirect `Operand` is returned.
515 ///
516 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
517 /// OpTy from a local
518 pub fn access_local(
519 &self,
520 frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
521 local: mir::Local,
522 layout: Option<TyAndLayout<'tcx>>,
523 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
524 let layout = self.layout_of_local(frame, local, layout)?;
525 let op = if layout.is_zst() {
526 // Do not read from ZST, they might not be initialized
527 Operand::Immediate(Scalar::ZST.into())
528 } else {
529 M::access_local(&self, frame, local)?
530 };
531 Ok(OpTy { op, layout })
532 }
533
534 /// Every place can be read from, so we can turn them into an operand.
535 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
536 /// will never actually read from memory.
537 #[inline(always)]
538 pub fn place_to_op(
539 &self,
540 place: &PlaceTy<'tcx, M::PointerTag>,
541 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
542 let op = match **place {
543 Place::Ptr(mplace) => Operand::Indirect(mplace),
544 Place::Local { frame, local } => {
545 *self.access_local(&self.stack()[frame], local, None)?
546 }
547 };
548 Ok(OpTy { op, layout: place.layout })
549 }
550
551 /// Evaluate a place with the goal of reading from it. This lets us sometimes
552 /// avoid allocations.
553 pub fn eval_place_to_op(
554 &self,
555 place: mir::Place<'tcx>,
556 layout: Option<TyAndLayout<'tcx>>,
557 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
558 // Do not use the layout passed in as argument if the base we are looking at
559 // here is not the entire place.
560 let layout = if place.projection.is_empty() { layout } else { None };
561
562 let base_op = self.access_local(self.frame(), place.local, layout)?;
563
564 let op = place
565 .projection
566 .iter()
567 .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
568
569 trace!("eval_place_to_op: got {:?}", *op);
570 // Sanity-check the type we ended up with.
571 debug_assert!(mir_assign_valid_types(
572 *self.tcx,
573 self.param_env,
574 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
575 place.ty(&self.frame().body.local_decls, *self.tcx).ty
576 )?)?,
577 op.layout,
578 ));
579 Ok(op)
580 }
581
582 /// Evaluate the operand, returning a place where you can then find the data.
583 /// If you already know the layout, you can save two table lookups
584 /// by passing it in here.
585 #[inline]
586 pub fn eval_operand(
587 &self,
588 mir_op: &mir::Operand<'tcx>,
589 layout: Option<TyAndLayout<'tcx>>,
590 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
591 use rustc_middle::mir::Operand::*;
592 let op = match *mir_op {
593 // FIXME: do some more logic on `move` to invalidate the old location
594 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
595
596 Constant(ref constant) => {
597 let val =
598 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
599 // This can still fail:
600 // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
601 // checked yet.
602 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
603
604 self.mir_const_to_op(&val, layout)?
605 }
606 };
607 trace!("{:?}: {:?}", mir_op, *op);
608 Ok(op)
609 }
610
611 /// Evaluate a bunch of operands at once
612 pub(super) fn eval_operands(
613 &self,
614 ops: &[mir::Operand<'tcx>],
615 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
616 ops.iter().map(|op| self.eval_operand(op, None)).collect()
617 }
618
619 // Used when the miri-engine runs into a constant and for extracting information from constants
620 // in patterns via the `const_eval` module
621 /// The `val` and `layout` are assumed to already be in our interpreter
622 /// "universe" (param_env).
623 pub fn const_to_op(
624 &self,
625 c: ty::Const<'tcx>,
626 layout: Option<TyAndLayout<'tcx>>,
627 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
628 match c.kind() {
629 ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
630 ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
631 throw_inval!(AlreadyReported(reported))
632 }
633 ty::ConstKind::Unevaluated(uv) => {
634 let instance = self.resolve(uv.def, uv.substs)?;
635 Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
636 }
637 ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
638 span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c)
639 }
640 ty::ConstKind::Value(valtree) => {
641 let ty = c.ty();
642 let const_val = self.tcx.valtree_to_const_val((ty, valtree));
643 self.const_val_to_op(const_val, ty, layout)
644 }
645 }
646 }
647
648 pub fn mir_const_to_op(
649 &self,
650 val: &mir::ConstantKind<'tcx>,
651 layout: Option<TyAndLayout<'tcx>>,
652 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
653 match val {
654 mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
655 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
656 }
657 }
658
659 pub(crate) fn const_val_to_op(
660 &self,
661 val_val: ConstValue<'tcx>,
662 ty: Ty<'tcx>,
663 layout: Option<TyAndLayout<'tcx>>,
664 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
665 // Other cases need layout.
666 let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
667 Ok(match scalar {
668 Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
669 Scalar::Int(int) => Scalar::Int(int),
670 })
671 };
672 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
673 let op = match val_val {
674 ConstValue::ByRef { alloc, offset } => {
675 let id = self.tcx.create_memory_alloc(alloc);
676 // We rely on mutability being set correctly in that allocation to prevent writes
677 // where none should happen.
678 let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
679 Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
680 }
681 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
682 ConstValue::Slice { data, start, end } => {
683 // We rely on mutability being set correctly in `data` to prevent writes
684 // where none should happen.
685 let ptr = Pointer::new(
686 self.tcx.create_memory_alloc(data),
687 Size::from_bytes(start), // offset: `start`
688 );
689 Operand::Immediate(Immediate::new_slice(
690 Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
691 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
692 self,
693 ))
694 }
695 };
696 Ok(OpTy { op, layout })
697 }
698
699 /// Read discriminant, return the runtime value as well as the variant index.
700 /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
701 pub fn read_discriminant(
702 &self,
703 op: &OpTy<'tcx, M::PointerTag>,
704 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
705 trace!("read_discriminant_value {:#?}", op.layout);
706 // Get type and layout of the discriminant.
707 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
708 trace!("discriminant type: {:?}", discr_layout.ty);
709
710 // We use "discriminant" to refer to the value associated with a particular enum variant.
711 // This is not to be confused with its "variant index", which is just determining its position in the
712 // declared list of variants -- they can differ with explicitly assigned discriminants.
713 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
714 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
715 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
716 Variants::Single { index } => {
717 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
718 Some(discr) => {
719 // This type actually has discriminants.
720 assert_eq!(discr.ty, discr_layout.ty);
721 Scalar::from_uint(discr.val, discr_layout.size)
722 }
723 None => {
724 // On a type without actual discriminants, variant is 0.
725 assert_eq!(index.as_u32(), 0);
726 Scalar::from_uint(index.as_u32(), discr_layout.size)
727 }
728 };
729 return Ok((discr, index));
730 }
731 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
732 (tag, tag_encoding, tag_field)
733 }
734 };
735
736 // There are *three* layouts that come into play here:
737 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
738 // the `Scalar` we return.
739 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
740 // and used to interpret the value we read from the tag field.
741 // For the return value, a cast to `discr_layout` is performed.
742 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
743 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
744
745 // Get layout for tag.
746 let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
747
748 // Read tag and sanity-check `tag_layout`.
749 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
750 assert_eq!(tag_layout.size, tag_val.layout.size);
751 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
752 trace!("tag value: {}", tag_val);
753
754 // Figure out which discriminant and variant this corresponds to.
755 Ok(match *tag_encoding {
756 TagEncoding::Direct => {
757 let scalar = tag_val.to_scalar()?;
758 // Generate a specific error if `tag_val` is not an integer.
759 // (`tag_bits` itself is only used for error messages below.)
760 let tag_bits = scalar
761 .try_to_int()
762 .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
763 .assert_bits(tag_layout.size);
764 // Cast bits from tag layout to discriminant layout.
765 // After the checks we did above, this cannot fail, as
766 // discriminants are int-like.
767 let discr_val =
768 self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
769 let discr_bits = discr_val.assert_bits(discr_layout.size);
770 // Convert discriminant to variant index, and catch invalid discriminants.
771 let index = match *op.layout.ty.kind() {
772 ty::Adt(adt, _) => {
773 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
774 }
775 ty::Generator(def_id, substs, _) => {
776 let substs = substs.as_generator();
777 substs
778 .discriminants(def_id, *self.tcx)
779 .find(|(_, var)| var.val == discr_bits)
780 }
781 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
782 }
783 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
784 // Return the cast value, and the index.
785 (discr_val, index.0)
786 }
787 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
788 let tag_val = tag_val.to_scalar()?;
789 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
790 // discriminant (encoded in niche/tag) and variant index are the same.
791 let variants_start = niche_variants.start().as_u32();
792 let variants_end = niche_variants.end().as_u32();
793 let variant = match tag_val.try_to_int() {
794 Err(dbg_val) => {
795 // So this is a pointer then, and casting to an int failed.
796 // Can only happen during CTFE.
797 // The niche must be just 0, and the ptr not null, then we know this is
798 // okay. Everything else, we conservatively reject.
799 let ptr_valid = niche_start == 0
800 && variants_start == variants_end
801 && !self.scalar_may_be_null(tag_val)?;
802 if !ptr_valid {
803 throw_ub!(InvalidTag(dbg_val))
804 }
805 dataful_variant
806 }
807 Ok(tag_bits) => {
808 let tag_bits = tag_bits.assert_bits(tag_layout.size);
809 // We need to use machine arithmetic to get the relative variant idx:
810 // variant_index_relative = tag_val - niche_start_val
811 let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
812 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
813 let variant_index_relative_val =
814 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
815 let variant_index_relative = variant_index_relative_val
816 .to_scalar()?
817 .assert_bits(tag_val.layout.size);
818 // Check if this is in the range that indicates an actual discriminant.
819 if variant_index_relative <= u128::from(variants_end - variants_start) {
820 let variant_index_relative = u32::try_from(variant_index_relative)
821 .expect("we checked that this fits into a u32");
822 // Then computing the absolute variant idx should not overflow any more.
823 let variant_index = variants_start
824 .checked_add(variant_index_relative)
825 .expect("overflow computing absolute variant idx");
826 let variants_len = op
827 .layout
828 .ty
829 .ty_adt_def()
830 .expect("tagged layout for non adt")
831 .variants()
832 .len();
833 assert!(usize::try_from(variant_index).unwrap() < variants_len);
834 VariantIdx::from_u32(variant_index)
835 } else {
836 dataful_variant
837 }
838 }
839 };
840 // Compute the size of the scalar we need to return.
841 // No need to cast, because the variant index directly serves as discriminant and is
842 // encoded in the tag.
843 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
844 }
845 })
846 }
847 }