]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_const_eval/src/interpret/operand.rs
New upstream version 1.66.0+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / operand.rs
CommitLineData
b7449926
XL
1//! Functions concerning immediate values and operands, and reading from operands.
2//! All high-level functions to read from memory work on operands as sources.
3
ba9703b0 4use rustc_hir::def::Namespace;
c295e0f8 5use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
f2b60f7d 6use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
5e7ed085 7use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
ba9703b0 8use rustc_middle::{mir, ty};
064997fb 9use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
ba9703b0
XL
10use rustc_target::abi::{VariantIdx, Variants};
11
12use super::{
064997fb
FG
13 alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
14 InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
f2b60f7d 15 Provenance, Scalar,
ba9703b0 16};
0bf4aa26 17
e74abb32 18/// An `Immediate` represents a single immediate self-contained Rust value.
b7449926
XL
19///
20/// For optimization of a few very common cases, there is also a representation for a pair of
21/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
60c5eb7d 22/// operations and wide pointers. This idea was taken from rustc's codegen.
b7449926 23/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
a1dfa0c6 24/// defined on `Immediate`, and do not have to work with a `Place`.
064997fb
FG
25#[derive(Copy, Clone, Debug)]
26pub enum Immediate<Prov: Provenance = AllocId> {
27 /// A single scalar value (must have *initialized* `Scalar` ABI).
f2b60f7d 28 Scalar(Scalar<Prov>),
064997fb
FG
29 /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
30 /// `Scalar::Initialized`).
f2b60f7d 31 ScalarPair(Scalar<Prov>, Scalar<Prov>),
064997fb
FG
32 /// A value of fully uninitialized memory. Can have and size and layout.
33 Uninit,
b7449926
XL
34}
35
064997fb 36impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
416331ca 37 #[inline(always)]
064997fb 38 fn from(val: Scalar<Prov>) -> Self {
416331ca 39 Immediate::Scalar(val.into())
9fa01778 40 }
416331ca 41}
9fa01778 42
f2b60f7d 43impl<Prov: Provenance> Immediate<Prov> {
064997fb 44 pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
f2b60f7d 45 Immediate::Scalar(Scalar::from_pointer(p, cx))
136023e0
XL
46 }
47
064997fb 48 pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
f2b60f7d 49 Immediate::Scalar(Scalar::from_maybe_pointer(p, cx))
60c5eb7d 50 }
60c5eb7d 51
064997fb 52 pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
ba9703b0 53 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
b7449926
XL
54 }
55
94222f64 56 pub fn new_dyn_trait(
064997fb
FG
57 val: Scalar<Prov>,
58 vtable: Pointer<Option<Prov>>,
94222f64
XL
59 cx: &impl HasDataLayout,
60 ) -> Self {
f2b60f7d 61 Immediate::ScalarPair(val.into(), Scalar::from_maybe_pointer(vtable, cx))
b7449926
XL
62 }
63
64 #[inline]
064997fb 65 #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
f2b60f7d 66 pub fn to_scalar(self) -> Scalar<Prov> {
b7449926 67 match self {
a1dfa0c6 68 Immediate::Scalar(val) => val,
94222f64 69 Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
f2b60f7d 70 Immediate::Uninit => bug!("Got uninit where a scalar was expected"),
b7449926
XL
71 }
72 }
73
74 #[inline]
064997fb 75 #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
f2b60f7d 76 pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
94222f64 77 match self {
04454e1e
FG
78 Immediate::ScalarPair(val1, val2) => (val1, val2),
79 Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
f2b60f7d 80 Immediate::Uninit => bug!("Got uninit where a scalar pair was expected"),
94222f64
XL
81 }
82 }
b7449926
XL
83}
84
a1dfa0c6 85// ScalarPair needs a type to interpret, so we often have an immediate and a type together
b7449926 86// as input for binary and cast operations.
064997fb
FG
87#[derive(Clone, Debug)]
88pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
89 imm: Immediate<Prov>,
ba9703b0 90 pub layout: TyAndLayout<'tcx>,
b7449926
XL
91}
92
064997fb 93impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
ba9703b0
XL
94 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
95 /// Helper function for printing a scalar to a FmtPrinter
064997fb 96 fn p<'a, 'tcx, Prov: Provenance>(
5e7ed085 97 cx: FmtPrinter<'a, 'tcx>,
f2b60f7d 98 s: Scalar<Prov>,
ba9703b0 99 ty: Ty<'tcx>,
5e7ed085 100 ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
ba9703b0 101 match s {
f2b60f7d
FG
102 Scalar::Int(int) => cx.pretty_print_const_scalar_int(int, ty, true),
103 Scalar::Ptr(ptr, _sz) => {
136023e0
XL
104 // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
105 // print what is points to, which would fail since it has no access to the local
106 // memory.
107 cx.pretty_print_const_pointer(ptr, ty, true)
ba9703b0 108 }
74b04a01 109 }
ba9703b0
XL
110 }
111 ty::tls::with(|tcx| {
112 match self.imm {
113 Immediate::Scalar(s) => {
29967ef6 114 if let Some(ty) = tcx.lift(self.layout.ty) {
5e7ed085
FG
115 let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
116 f.write_str(&p(cx, s, ty)?.into_buffer())?;
ba9703b0 117 return Ok(());
60c5eb7d 118 }
5e7ed085 119 write!(f, "{:x}: {}", s, self.layout.ty)
ba9703b0
XL
120 }
121 Immediate::ScalarPair(a, b) => {
122 // FIXME(oli-obk): at least print tuples and slices nicely
064997fb
FG
123 write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
124 }
125 Immediate::Uninit => {
126 write!(f, "uninit: {}", self.layout.ty)
dfeec247 127 }
74b04a01 128 }
ba9703b0 129 })
60c5eb7d
XL
130 }
131}
132
064997fb
FG
133impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
134 type Target = Immediate<Prov>;
b7449926 135 #[inline(always)]
064997fb 136 fn deref(&self) -> &Immediate<Prov> {
9fa01778 137 &self.imm
b7449926
XL
138 }
139}
140
141/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
9fa01778 142/// or still in memory. The latter is an optimization, to delay reading that chunk of
b7449926 143/// memory and to avoid having to store arbitrary-sized data here.
064997fb
FG
144#[derive(Copy, Clone, Debug)]
145pub enum Operand<Prov: Provenance = AllocId> {
146 Immediate(Immediate<Prov>),
147 Indirect(MemPlace<Prov>),
b7449926
XL
148}
149
064997fb
FG
150#[derive(Clone, Debug)]
151pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
152 op: Operand<Prov>, // Keep this private; it helps enforce invariants.
ba9703b0 153 pub layout: TyAndLayout<'tcx>,
064997fb
FG
154 /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
155 /// it needs to have a different alignment than the field type would usually have.
156 /// So we represent this here with a separate field that "overwrites" `layout.align`.
157 /// This means `layout.align` should never be used for an `OpTy`!
158 /// `None` means "alignment does not matter since this is a by-value operand"
159 /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
160 /// Also CTFE ignores alignment anyway, so this is for Miri only.
161 pub align: Option<Align>,
b7449926
XL
162}
163
064997fb
FG
164impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
165 type Target = Operand<Prov>;
b7449926 166 #[inline(always)]
064997fb 167 fn deref(&self) -> &Operand<Prov> {
b7449926
XL
168 &self.op
169 }
170}
171
064997fb 172impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
b7449926 173 #[inline(always)]
064997fb
FG
174 fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
175 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
b7449926
XL
176 }
177}
178
064997fb 179impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
6a06907d 180 #[inline(always)]
064997fb
FG
181 fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
182 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
6a06907d
XL
183 }
184}
185
064997fb 186impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
b7449926 187 #[inline(always)]
064997fb
FG
188 fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
189 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
b7449926
XL
190 }
191}
192
064997fb
FG
193impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
194 #[inline(always)]
195 fn from(val: ImmTy<'tcx, Prov>) -> Self {
196 OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
197 }
198}
199
200impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
9fa01778 201 #[inline]
064997fb 202 pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
416331ca 203 ImmTy { imm: val.into(), layout }
9fa01778
XL
204 }
205
dfeec247 206 #[inline]
064997fb 207 pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
ba9703b0
XL
208 ImmTy { imm, layout }
209 }
210
064997fb
FG
211 #[inline]
212 pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
213 ImmTy { imm: Immediate::Uninit, layout }
214 }
215
ba9703b0
XL
216 #[inline]
217 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
dfeec247
XL
218 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
219 }
e1599b0c 220 #[inline]
ba9703b0 221 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
e1599b0c
XL
222 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
223 }
224
dfeec247 225 #[inline]
ba9703b0 226 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
dfeec247
XL
227 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
228 }
229
e1599b0c 230 #[inline]
ba9703b0 231 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
e1599b0c
XL
232 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
233 }
f035d41b
XL
234
235 #[inline]
236 pub fn to_const_int(self) -> ConstInt {
237 assert!(self.layout.ty.is_integral());
f2b60f7d 238 let int = self.to_scalar().assert_int();
29967ef6 239 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
f035d41b 240 }
9fa01778
XL
241}
242
064997fb
FG
243impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
244 pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
245 if self.layout.is_unsized() {
246 // There are no unsized immediates.
247 self.assert_mem_place().len(cx)
248 } else {
249 match self.layout.fields {
250 abi::FieldsShape::Array { count, .. } => Ok(count),
251 _ => bug!("len not supported on sized type {:?}", self.layout.ty),
252 }
253 }
254 }
255
256 pub fn offset_with_meta(
257 &self,
258 offset: Size,
259 meta: MemPlaceMeta<Prov>,
260 layout: TyAndLayout<'tcx>,
261 cx: &impl HasDataLayout,
262 ) -> InterpResult<'tcx, Self> {
263 match self.try_as_mplace() {
264 Ok(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
265 Err(imm) => {
266 assert!(
267 matches!(*imm, Immediate::Uninit),
268 "Scalar/ScalarPair cannot be offset into"
269 );
270 assert!(!meta.has_meta()); // no place to store metadata here
271 // Every part of an uninit is uninit.
272 Ok(ImmTy::uninit(layout).into())
273 }
274 }
275 }
276
277 pub fn offset(
278 &self,
279 offset: Size,
280 layout: TyAndLayout<'tcx>,
281 cx: &impl HasDataLayout,
282 ) -> InterpResult<'tcx, Self> {
283 assert!(!layout.is_unsized());
284 self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
285 }
286}
287
ba9703b0 288impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dc9dc135 289 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
9fa01778 290 /// Returns `None` if the layout does not permit loading this as a value.
04454e1e
FG
291 ///
292 /// This is an internal function; call `read_immediate` instead.
293 fn read_immediate_from_mplace_raw(
b7449926 294 &self,
064997fb 295 mplace: &MPlaceTy<'tcx, M::Provenance>,
064997fb 296 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
b7449926 297 if mplace.layout.is_unsized() {
0bf4aa26 298 // Don't touch unsized
b7449926
XL
299 return Ok(None);
300 }
b7449926 301
04454e1e 302 let Some(alloc) = self.get_place_alloc(mplace)? else {
064997fb
FG
303 // zero-sized type can be left uninit
304 return Ok(Some(ImmTy::uninit(mplace.layout)));
dc9dc135 305 };
b7449926 306
04454e1e
FG
307 // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
308 // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
309 // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
310 // case where some of the bytes are initialized and others are not. So, we need an extra
311 // check that walks over the type of `mplace` to make sure it is truly correct to treat this
312 // like a `Scalar` (or `ScalarPair`).
f2b60f7d
FG
313 Ok(match mplace.layout.abi {
314 Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
315 let size = s.size(self);
316 assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
317 let scalar = alloc.read_scalar(
318 alloc_range(Size::ZERO, size),
319 /*read_provenance*/ s.is_ptr(),
320 )?;
321 Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
322 }
04454e1e
FG
323 Abi::ScalarPair(
324 abi::Scalar::Initialized { value: a, .. },
325 abi::Scalar::Initialized { value: b, .. },
f2b60f7d
FG
326 ) => {
327 // We checked `ptr_align` above, so all fields will have the alignment they need.
328 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
329 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
330 let (a_size, b_size) = (a.size(self), b.size(self));
331 let b_offset = a_size.align_to(b.align(self).abi);
332 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
333 let a_val = alloc.read_scalar(
334 alloc_range(Size::ZERO, a_size),
335 /*read_provenance*/ a.is_ptr(),
336 )?;
337 let b_val = alloc.read_scalar(
338 alloc_range(b_offset, b_size),
339 /*read_provenance*/ b.is_ptr(),
340 )?;
341 Some(ImmTy {
342 imm: Immediate::ScalarPair(a_val.into(), b_val.into()),
343 layout: mplace.layout,
344 })
345 }
346 _ => {
347 // Neither a scalar nor scalar pair.
348 None
349 }
350 })
b7449926
XL
351 }
352
04454e1e
FG
353 /// Try returning an immediate for the operand. If the layout does not permit loading this as an
354 /// immediate, return where in memory we can find the data.
b7449926
XL
355 /// Note that for a given layout, this operation will either always fail or always
356 /// succeed! Whether it succeeds depends on whether the layout can be represented
94222f64 357 /// in an `Immediate`, not on which data is stored there currently.
04454e1e 358 ///
04454e1e 359 /// This is an internal function that should not usually be used; call `read_immediate` instead.
064997fb 360 /// ConstProp needs it, though.
04454e1e 361 pub fn read_immediate_raw(
b7449926 362 &self,
064997fb 363 src: &OpTy<'tcx, M::Provenance>,
064997fb 364 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
136023e0 365 Ok(match src.try_as_mplace() {
6a06907d 366 Ok(ref mplace) => {
f2b60f7d 367 if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
b7449926
XL
368 Ok(val)
369 } else {
6a06907d 370 Err(*mplace)
b7449926 371 }
dfeec247 372 }
b7449926
XL
373 Err(val) => Ok(val),
374 })
375 }
376
a1dfa0c6 377 /// Read an immediate from a place, asserting that that is possible with the given layout.
f2b60f7d
FG
378 ///
379 /// If this suceeds, the `ImmTy` is never `Uninit`.
b7449926 380 #[inline(always)]
a1dfa0c6 381 pub fn read_immediate(
0bf4aa26 382 &self,
064997fb
FG
383 op: &OpTy<'tcx, M::Provenance>,
384 ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
f2b60f7d
FG
385 if !matches!(
386 op.layout.abi,
387 Abi::Scalar(abi::Scalar::Initialized { .. })
388 | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
389 ) {
390 span_bug!(self.cur_span(), "primitive read not possible for type: {:?}", op.layout.ty);
b7449926 391 }
f2b60f7d
FG
392 let imm = self.read_immediate_raw(op)?.unwrap();
393 if matches!(*imm, Immediate::Uninit) {
394 throw_ub!(InvalidUninitBytes(None));
395 }
396 Ok(imm)
b7449926
XL
397 }
398
399 /// Read a scalar from a place
0bf4aa26
XL
400 pub fn read_scalar(
401 &self,
064997fb 402 op: &OpTy<'tcx, M::Provenance>,
f2b60f7d
FG
403 ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
404 Ok(self.read_immediate(op)?.to_scalar())
b7449926
XL
405 }
406
136023e0
XL
407 /// Read a pointer from a place.
408 pub fn read_pointer(
409 &self,
064997fb
FG
410 op: &OpTy<'tcx, M::Provenance>,
411 ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
412 self.read_scalar(op)?.to_pointer(self)
136023e0
XL
413 }
414
064997fb
FG
415 /// Turn the wide MPlace into a string (must already be dereferenced!)
416 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
b7449926 417 let len = mplace.len(self)?;
f2b60f7d 418 let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr, Size::from_bytes(len))?;
29967ef6 419 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
b7449926
XL
420 Ok(str)
421 }
422
3c0e092e
XL
423 /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
424 /// Also returns the number of elements.
064997fb
FG
425 ///
426 /// Can (but does not always) trigger UB if `op` is uninitialized.
3c0e092e
XL
427 pub fn operand_to_simd(
428 &self,
064997fb
FG
429 op: &OpTy<'tcx, M::Provenance>,
430 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
3c0e092e
XL
431 // Basically we just transmute this place into an array following simd_size_and_type.
432 // This only works in memory, but repr(simd) types should never be immediates anyway.
064997fb
FG
433 assert!(op.layout.ty.is_simd());
434 match op.try_as_mplace() {
435 Ok(mplace) => self.mplace_to_simd(&mplace),
436 Err(imm) => match *imm {
437 Immediate::Uninit => {
438 throw_ub!(InvalidUninitBytes(None))
439 }
440 Immediate::Scalar(..) | Immediate::ScalarPair(..) => {
441 bug!("arrays/slices can never have Scalar/ScalarPair layout")
442 }
443 },
444 }
3c0e092e
XL
445 }
446
f2b60f7d 447 /// Read from a local.
f035d41b
XL
448 /// Will not access memory, instead an indirect `Operand` is returned.
449 ///
450 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
064997fb
FG
451 /// OpTy from a local.
452 pub fn local_to_op(
0bf4aa26 453 &self,
064997fb 454 frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
0bf4aa26 455 local: mir::Local,
ba9703b0 456 layout: Option<TyAndLayout<'tcx>>,
064997fb 457 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
9fa01778 458 let layout = self.layout_of_local(frame, local, layout)?;
f2b60f7d 459 let op = *frame.locals[local].access()?;
064997fb 460 Ok(OpTy { op, layout, align: Some(layout.align.abi) })
0bf4aa26
XL
461 }
462
ba9703b0
XL
463 /// Every place can be read from, so we can turn them into an operand.
464 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
465 /// will never actually read from memory.
9fa01778
XL
466 #[inline(always)]
467 pub fn place_to_op(
468 &self,
064997fb
FG
469 place: &PlaceTy<'tcx, M::Provenance>,
470 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
6a06907d 471 let op = match **place {
dfeec247 472 Place::Ptr(mplace) => Operand::Indirect(mplace),
ba9703b0 473 Place::Local { frame, local } => {
064997fb 474 *self.local_to_op(&self.stack()[frame], local, None)?
ba9703b0 475 }
9fa01778 476 };
064997fb 477 Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
9fa01778
XL
478 }
479
04454e1e
FG
480 /// Evaluate a place with the goal of reading from it. This lets us sometimes
481 /// avoid allocations.
e74abb32 482 pub fn eval_place_to_op(
b7449926 483 &self,
064997fb 484 mir_place: mir::Place<'tcx>,
ba9703b0 485 layout: Option<TyAndLayout<'tcx>>,
064997fb 486 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
f9f354fc
XL
487 // Do not use the layout passed in as argument if the base we are looking at
488 // here is not the entire place.
064997fb 489 let layout = if mir_place.projection.is_empty() { layout } else { None };
b7449926 490
064997fb
FG
491 let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
492 // Using `try_fold` turned out to be bad for performance, hence the loop.
493 for elem in mir_place.projection.iter() {
494 op = self.operand_projection(&op, elem)?
495 }
e1599b0c
XL
496
497 trace!("eval_place_to_op: got {:?}", *op);
f9f354fc 498 // Sanity-check the type we ended up with.
064997fb
FG
499 debug_assert!(
500 mir_assign_valid_types(
501 *self.tcx,
502 self.param_env,
503 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
504 mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
505 )?)?,
506 op.layout,
507 ),
508 "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
509 mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
510 op.layout.ty,
511 );
e1599b0c 512 Ok(op)
b7449926
XL
513 }
514
515 /// Evaluate the operand, returning a place where you can then find the data.
dc9dc135 516 /// If you already know the layout, you can save two table lookups
b7449926 517 /// by passing it in here.
6a06907d 518 #[inline]
b7449926
XL
519 pub fn eval_operand(
520 &self,
521 mir_op: &mir::Operand<'tcx>,
ba9703b0 522 layout: Option<TyAndLayout<'tcx>>,
064997fb 523 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
ba9703b0 524 use rustc_middle::mir::Operand::*;
b7449926
XL
525 let op = match *mir_op {
526 // FIXME: do some more logic on `move` to invalidate the old location
ba9703b0 527 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
b7449926 528
e1599b0c 529 Constant(ref constant) => {
ba9703b0 530 let val =
a2a8927a 531 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
064997fb 532
5869c6ff 533 // This can still fail:
5e7ed085 534 // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
5869c6ff
XL
535 // checked yet.
536 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
2b03887a 537 self.const_to_op(&val, layout)?
e1599b0c 538 }
b7449926
XL
539 };
540 trace!("{:?}: {:?}", mir_op, *op);
541 Ok(op)
542 }
543
544 /// Evaluate a bunch of operands at once
545 pub(super) fn eval_operands(
546 &self,
547 ops: &[mir::Operand<'tcx>],
064997fb 548 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
74b04a01 549 ops.iter().map(|op| self.eval_operand(op, None)).collect()
b7449926
XL
550 }
551
c295e0f8 552 pub fn const_to_op(
6a06907d
XL
553 &self,
554 val: &mir::ConstantKind<'tcx>,
555 layout: Option<TyAndLayout<'tcx>>,
064997fb 556 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
f2b60f7d
FG
557 // FIXME(const_prop): normalization needed b/c const prop lint in
558 // `mir_drops_elaborated_and_const_checked`, which happens before
559 // optimized MIR. Only after optimizing the MIR can we guarantee
560 // that the `RevealAll` pass has happened and that the body's consts
561 // are normalized, so any call to resolve before that needs to be
562 // manually normalized.
563 let val = self.tcx.normalize_erasing_regions(self.param_env, *val);
6a06907d 564 match val {
2b03887a
FG
565 mir::ConstantKind::Ty(ct) => {
566 match ct.kind() {
567 ty::ConstKind::Param(_) | ty::ConstKind::Placeholder(..) => {
568 throw_inval!(TooGeneric)
569 }
570 ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
571 throw_inval!(AlreadyReported(reported))
572 }
573 ty::ConstKind::Unevaluated(uv) => {
574 // NOTE: We evaluate to a `ValTree` here as a check to ensure
575 // we're working with valid constants, even though we never need it.
576 let instance = self.resolve(uv.def, uv.substs)?;
577 let cid = GlobalId { instance, promoted: None };
578 let _valtree = self
579 .tcx
580 .eval_to_valtree(self.param_env.and(cid))?
581 .unwrap_or_else(|| bug!("unable to create ValTree for {uv:?}"));
582
583 Ok(self.eval_to_allocation(cid)?.into())
584 }
585 ty::ConstKind::Bound(..) | ty::ConstKind::Infer(..) => {
586 span_bug!(self.cur_span(), "unexpected ConstKind in ctfe: {ct:?}")
587 }
588 ty::ConstKind::Value(valtree) => {
589 let ty = ct.ty();
590 let const_val = self.tcx.valtree_to_const_val((ty, valtree));
591 self.const_val_to_op(const_val, ty, layout)
592 }
593 }
594 }
f2b60f7d
FG
595 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(val, ty, layout),
596 mir::ConstantKind::Unevaluated(uv, _) => {
597 let instance = self.resolve(uv.def, uv.substs)?;
598 Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
599 }
6a06907d
XL
600 }
601 }
602
923072b8 603 pub(crate) fn const_val_to_op(
6a06907d
XL
604 &self,
605 val_val: ConstValue<'tcx>,
606 ty: Ty<'tcx>,
607 layout: Option<TyAndLayout<'tcx>>,
064997fb 608 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
dc9dc135 609 // Other cases need layout.
064997fb 610 let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
6a06907d 611 Ok(match scalar {
136023e0 612 Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
6a06907d
XL
613 Scalar::Int(int) => Scalar::Int(int),
614 })
615 };
616 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
60c5eb7d 617 let op = match val_val {
416331ca 618 ConstValue::ByRef { alloc, offset } => {
f9f354fc 619 let id = self.tcx.create_memory_alloc(alloc);
dc9dc135
XL
620 // We rely on mutability being set correctly in that allocation to prevent writes
621 // where none should happen.
3dfed10e 622 let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
064997fb 623 Operand::Indirect(MemPlace::from_ptr(ptr.into()))
dfeec247 624 }
064997fb
FG
625 ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
626 ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
dc9dc135
XL
627 ConstValue::Slice { data, start, end } => {
628 // We rely on mutability being set correctly in `data` to prevent writes
629 // where none should happen.
630 let ptr = Pointer::new(
f9f354fc 631 self.tcx.create_memory_alloc(data),
ba9703b0 632 Size::from_bytes(start), // offset: `start`
dc9dc135
XL
633 );
634 Operand::Immediate(Immediate::new_slice(
136023e0 635 Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
ba9703b0 636 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
dc9dc135
XL
637 self,
638 ))
639 }
dc9dc135 640 };
064997fb 641 Ok(OpTy { op, layout, align: Some(layout.align.abi) })
b7449926 642 }
b7449926
XL
643
644 /// Read discriminant, return the runtime value as well as the variant index.
c295e0f8 645 /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
b7449926
XL
646 pub fn read_discriminant(
647 &self,
064997fb
FG
648 op: &OpTy<'tcx, M::Provenance>,
649 ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
f9f354fc 650 trace!("read_discriminant_value {:#?}", op.layout);
f9f354fc
XL
651 // Get type and layout of the discriminant.
652 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
653 trace!("discriminant type: {:?}", discr_layout.ty);
654
655 // We use "discriminant" to refer to the value associated with a particular enum variant.
656 // This is not to be confused with its "variant index", which is just determining its position in the
657 // declared list of variants -- they can differ with explicitly assigned discriminants.
658 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
f035d41b
XL
659 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
660 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
ba9703b0 661 Variants::Single { index } => {
f9f354fc
XL
662 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
663 Some(discr) => {
664 // This type actually has discriminants.
665 assert_eq!(discr.ty, discr_layout.ty);
666 Scalar::from_uint(discr.val, discr_layout.size)
667 }
668 None => {
669 // On a type without actual discriminants, variant is 0.
670 assert_eq!(index.as_u32(), 0);
671 Scalar::from_uint(index.as_u32(), discr_layout.size)
672 }
673 };
674 return Ok((discr, index));
b7449926 675 }
c295e0f8 676 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
f035d41b 677 (tag, tag_encoding, tag_field)
ba9703b0 678 }
532ac7d7
XL
679 };
680
f9f354fc
XL
681 // There are *three* layouts that come into play here:
682 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
683 // the `Scalar` we return.
684 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
685 // and used to interpret the value we read from the tag field.
686 // For the return value, a cast to `discr_layout` is performed.
687 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
688 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
689
690 // Get layout for tag.
04454e1e 691 let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
f9f354fc
XL
692
693 // Read tag and sanity-check `tag_layout`.
6a06907d 694 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
f9f354fc
XL
695 assert_eq!(tag_layout.size, tag_val.layout.size);
696 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
5e7ed085 697 trace!("tag value: {}", tag_val);
f9f354fc
XL
698
699 // Figure out which discriminant and variant this corresponds to.
f035d41b
XL
700 Ok(match *tag_encoding {
701 TagEncoding::Direct => {
f2b60f7d 702 let scalar = tag_val.to_scalar();
5e7ed085
FG
703 // Generate a specific error if `tag_val` is not an integer.
704 // (`tag_bits` itself is only used for error messages below.)
04454e1e 705 let tag_bits = scalar
136023e0
XL
706 .try_to_int()
707 .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
708 .assert_bits(tag_layout.size);
f9f354fc 709 // Cast bits from tag layout to discriminant layout.
04454e1e
FG
710 // After the checks we did above, this cannot fail, as
711 // discriminants are int-like.
5e7ed085 712 let discr_val =
04454e1e 713 self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
f035d41b 714 let discr_bits = discr_val.assert_bits(discr_layout.size);
f9f354fc 715 // Convert discriminant to variant index, and catch invalid discriminants.
1b1a35ee 716 let index = match *op.layout.ty.kind() {
dfeec247 717 ty::Adt(adt, _) => {
f035d41b 718 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
dfeec247 719 }
e74abb32
XL
720 ty::Generator(def_id, substs, _) => {
721 let substs = substs.as_generator();
722 substs
f035d41b 723 .discriminants(def_id, *self.tcx)
f9f354fc 724 .find(|(_, var)| var.val == discr_bits)
e74abb32 725 }
f035d41b 726 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
dfeec247 727 }
136023e0 728 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
f9f354fc 729 // Return the cast value, and the index.
f035d41b 730 (discr_val, index.0)
dfeec247 731 }
f2b60f7d
FG
732 TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
733 let tag_val = tag_val.to_scalar();
f9f354fc
XL
734 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
735 // discriminant (encoded in niche/tag) and variant index are the same.
e1599b0c
XL
736 let variants_start = niche_variants.start().as_u32();
737 let variants_end = niche_variants.end().as_u32();
136023e0
XL
738 let variant = match tag_val.try_to_int() {
739 Err(dbg_val) => {
740 // So this is a pointer then, and casting to an int failed.
741 // Can only happen during CTFE.
136023e0
XL
742 // The niche must be just 0, and the ptr not null, then we know this is
743 // okay. Everything else, we conservatively reject.
dfeec247
XL
744 let ptr_valid = niche_start == 0
745 && variants_start == variants_end
04454e1e 746 && !self.scalar_may_be_null(tag_val)?;
a1dfa0c6 747 if !ptr_valid {
136023e0 748 throw_ub!(InvalidTag(dbg_val))
a1dfa0c6 749 }
f2b60f7d 750 untagged_variant
dfeec247 751 }
f9f354fc 752 Ok(tag_bits) => {
136023e0 753 let tag_bits = tag_bits.assert_bits(tag_layout.size);
e1599b0c 754 // We need to use machine arithmetic to get the relative variant idx:
f9f354fc
XL
755 // variant_index_relative = tag_val - niche_start_val
756 let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
757 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
dfeec247 758 let variant_index_relative_val =
6a06907d 759 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
f2b60f7d
FG
760 let variant_index_relative =
761 variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
e1599b0c
XL
762 // Check if this is in the range that indicates an actual discriminant.
763 if variant_index_relative <= u128::from(variants_end - variants_start) {
764 let variant_index_relative = u32::try_from(variant_index_relative)
765 .expect("we checked that this fits into a u32");
766 // Then computing the absolute variant idx should not overflow any more.
767 let variant_index = variants_start
768 .checked_add(variant_index_relative)
74b04a01 769 .expect("overflow computing absolute variant idx");
f9f354fc 770 let variants_len = op
dfeec247
XL
771 .layout
772 .ty
a1dfa0c6
XL
773 .ty_adt_def()
774 .expect("tagged layout for non adt")
5e7ed085 775 .variants()
dfeec247 776 .len();
ba9703b0 777 assert!(usize::try_from(variant_index).unwrap() < variants_len);
f9f354fc 778 VariantIdx::from_u32(variant_index)
b7449926 779 } else {
f2b60f7d 780 untagged_variant
b7449926 781 }
dfeec247 782 }
f9f354fc
XL
783 };
784 // Compute the size of the scalar we need to return.
785 // No need to cast, because the variant index directly serves as discriminant and is
786 // encoded in the tag.
787 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
b7449926
XL
788 }
789 })
790 }
b7449926 791}
064997fb
FG
792
793// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
2b03887a 794#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
064997fb
FG
795mod size_asserts {
796 use super::*;
f2b60f7d 797 use rustc_data_structures::static_assert_size;
2b03887a 798 // tidy-alphabetical-start
f2b60f7d
FG
799 static_assert_size!(Immediate, 48);
800 static_assert_size!(ImmTy<'_>, 64);
801 static_assert_size!(Operand, 56);
802 static_assert_size!(OpTy<'_>, 80);
2b03887a 803 // tidy-alphabetical-end
064997fb 804}