]>
Commit | Line | Data |
---|---|---|
b7449926 XL |
1 | //! Functions concerning immediate values and operands, and reading from operands. |
2 | //! All high-level functions to read from memory work on operands as sources. | |
3 | ||
ba9703b0 XL |
4 | use std::convert::TryFrom; |
5 | use std::fmt::Write; | |
b7449926 | 6 | |
ba9703b0 | 7 | use rustc_hir::def::Namespace; |
60c5eb7d | 8 | use rustc_macros::HashStable; |
c295e0f8 | 9 | use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; |
ba9703b0 | 10 | use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer}; |
5e7ed085 | 11 | use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty}; |
ba9703b0 | 12 | use rustc_middle::{mir, ty}; |
04454e1e | 13 | use rustc_target::abi::{self, Abi, HasDataLayout, Size, TagEncoding}; |
ba9703b0 XL |
14 | use rustc_target::abi::{VariantIdx, Variants}; |
15 | ||
16 | use super::{ | |
136023e0 | 17 | alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId, |
923072b8 FG |
18 | InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, |
19 | PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit, | |
ba9703b0 | 20 | }; |
0bf4aa26 | 21 | |
e74abb32 | 22 | /// An `Immediate` represents a single immediate self-contained Rust value. |
b7449926 XL |
23 | /// |
24 | /// For optimization of a few very common cases, there is also a representation for a pair of | |
25 | /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary | |
60c5eb7d | 26 | /// operations and wide pointers. This idea was taken from rustc's codegen. |
b7449926 | 27 | /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely |
a1dfa0c6 | 28 | /// defined on `Immediate`, and do not have to work with a `Place`. |
136023e0 XL |
29 | #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)] |
30 | pub enum Immediate<Tag: Provenance = AllocId> { | |
f9f354fc XL |
31 | Scalar(ScalarMaybeUninit<Tag>), |
32 | ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>), | |
b7449926 XL |
33 | } |
34 | ||
6a06907d XL |
35 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
36 | rustc_data_structures::static_assert_size!(Immediate, 56); | |
37 | ||
136023e0 | 38 | impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> { |
416331ca | 39 | #[inline(always)] |
f9f354fc | 40 | fn from(val: ScalarMaybeUninit<Tag>) -> Self { |
416331ca XL |
41 | Immediate::Scalar(val) |
42 | } | |
43 | } | |
44 | ||
136023e0 | 45 | impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> { |
416331ca XL |
46 | #[inline(always)] |
47 | fn from(val: Scalar<Tag>) -> Self { | |
48 | Immediate::Scalar(val.into()) | |
9fa01778 | 49 | } |
416331ca | 50 | } |
9fa01778 | 51 | |
136023e0 XL |
52 | impl<'tcx, Tag: Provenance> Immediate<Tag> { |
53 | pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self { | |
54 | Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx)) | |
55 | } | |
56 | ||
57 | pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self { | |
58 | Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx)) | |
60c5eb7d | 59 | } |
60c5eb7d | 60 | |
dfeec247 | 61 | pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self { |
ba9703b0 | 62 | Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into()) |
b7449926 XL |
63 | } |
64 | ||
94222f64 XL |
65 | pub fn new_dyn_trait( |
66 | val: Scalar<Tag>, | |
67 | vtable: Pointer<Option<Tag>>, | |
68 | cx: &impl HasDataLayout, | |
69 | ) -> Self { | |
70 | Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx)) | |
b7449926 XL |
71 | } |
72 | ||
73 | #[inline] | |
3dfed10e | 74 | pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> { |
b7449926 | 75 | match self { |
a1dfa0c6 | 76 | Immediate::Scalar(val) => val, |
94222f64 | 77 | Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"), |
b7449926 XL |
78 | } |
79 | } | |
80 | ||
81 | #[inline] | |
dc9dc135 | 82 | pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> { |
3dfed10e | 83 | self.to_scalar_or_uninit().check_init() |
b7449926 | 84 | } |
94222f64 XL |
85 | |
86 | #[inline] | |
04454e1e | 87 | pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>) { |
94222f64 | 88 | match self { |
04454e1e FG |
89 | Immediate::ScalarPair(val1, val2) => (val1, val2), |
90 | Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"), | |
94222f64 XL |
91 | } |
92 | } | |
04454e1e FG |
93 | |
94 | #[inline] | |
95 | pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> { | |
96 | let (val1, val2) = self.to_scalar_or_uninit_pair(); | |
97 | Ok((val1.check_init()?, val2.check_init()?)) | |
98 | } | |
b7449926 XL |
99 | } |
100 | ||
a1dfa0c6 | 101 | // ScalarPair needs a type to interpret, so we often have an immediate and a type together |
b7449926 XL |
102 | // as input for binary and cast operations. |
103 | #[derive(Copy, Clone, Debug)] | |
136023e0 | 104 | pub struct ImmTy<'tcx, Tag: Provenance = AllocId> { |
ba9703b0 XL |
105 | imm: Immediate<Tag>, |
106 | pub layout: TyAndLayout<'tcx>, | |
b7449926 XL |
107 | } |
108 | ||
6a06907d XL |
109 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
110 | rustc_data_structures::static_assert_size!(ImmTy<'_>, 72); | |
111 | ||
a2a8927a | 112 | impl<Tag: Provenance> std::fmt::Display for ImmTy<'_, Tag> { |
ba9703b0 XL |
113 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
114 | /// Helper function for printing a scalar to a FmtPrinter | |
5e7ed085 FG |
115 | fn p<'a, 'tcx, Tag: Provenance>( |
116 | cx: FmtPrinter<'a, 'tcx>, | |
f9f354fc | 117 | s: ScalarMaybeUninit<Tag>, |
ba9703b0 | 118 | ty: Ty<'tcx>, |
5e7ed085 | 119 | ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> { |
ba9703b0 | 120 | match s { |
136023e0 XL |
121 | ScalarMaybeUninit::Scalar(Scalar::Int(int)) => { |
122 | cx.pretty_print_const_scalar_int(int, ty, true) | |
123 | } | |
124 | ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => { | |
125 | // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to | |
126 | // print what is points to, which would fail since it has no access to the local | |
127 | // memory. | |
128 | cx.pretty_print_const_pointer(ptr, ty, true) | |
ba9703b0 | 129 | } |
f9f354fc | 130 | ScalarMaybeUninit::Uninit => cx.typed_value( |
ba9703b0 | 131 | |mut this| { |
6a06907d | 132 | this.write_str("uninit ")?; |
ba9703b0 XL |
133 | Ok(this) |
134 | }, | |
135 | |this| this.print_type(ty), | |
136 | " ", | |
137 | ), | |
74b04a01 | 138 | } |
ba9703b0 XL |
139 | } |
140 | ty::tls::with(|tcx| { | |
141 | match self.imm { | |
142 | Immediate::Scalar(s) => { | |
29967ef6 | 143 | if let Some(ty) = tcx.lift(self.layout.ty) { |
5e7ed085 FG |
144 | let cx = FmtPrinter::new(tcx, Namespace::ValueNS); |
145 | f.write_str(&p(cx, s, ty)?.into_buffer())?; | |
ba9703b0 | 146 | return Ok(()); |
60c5eb7d | 147 | } |
5e7ed085 | 148 | write!(f, "{:x}: {}", s, self.layout.ty) |
ba9703b0 XL |
149 | } |
150 | Immediate::ScalarPair(a, b) => { | |
151 | // FIXME(oli-obk): at least print tuples and slices nicely | |
5e7ed085 | 152 | write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty,) |
dfeec247 | 153 | } |
74b04a01 | 154 | } |
ba9703b0 | 155 | }) |
60c5eb7d XL |
156 | } |
157 | } | |
158 | ||
136023e0 | 159 | impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> { |
a1dfa0c6 | 160 | type Target = Immediate<Tag>; |
b7449926 | 161 | #[inline(always)] |
a1dfa0c6 | 162 | fn deref(&self) -> &Immediate<Tag> { |
9fa01778 | 163 | &self.imm |
b7449926 XL |
164 | } |
165 | } | |
166 | ||
167 | /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, | |
9fa01778 | 168 | /// or still in memory. The latter is an optimization, to delay reading that chunk of |
b7449926 | 169 | /// memory and to avoid having to store arbitrary-sized data here. |
136023e0 XL |
170 | #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)] |
171 | pub enum Operand<Tag: Provenance = AllocId> { | |
f9f354fc XL |
172 | Immediate(Immediate<Tag>), |
173 | Indirect(MemPlace<Tag>), | |
b7449926 XL |
174 | } |
175 | ||
136023e0 XL |
176 | #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] |
177 | pub struct OpTy<'tcx, Tag: Provenance = AllocId> { | |
60c5eb7d | 178 | op: Operand<Tag>, // Keep this private; it helps enforce invariants. |
ba9703b0 | 179 | pub layout: TyAndLayout<'tcx>, |
b7449926 XL |
180 | } |
181 | ||
6a06907d | 182 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
136023e0 | 183 | rustc_data_structures::static_assert_size!(OpTy<'_>, 80); |
6a06907d | 184 | |
136023e0 | 185 | impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> { |
0bf4aa26 | 186 | type Target = Operand<Tag>; |
b7449926 | 187 | #[inline(always)] |
0bf4aa26 | 188 | fn deref(&self) -> &Operand<Tag> { |
b7449926 XL |
189 | &self.op |
190 | } | |
191 | } | |
192 | ||
136023e0 | 193 | impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { |
b7449926 | 194 | #[inline(always)] |
0bf4aa26 | 195 | fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { |
dfeec247 | 196 | OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout } |
b7449926 XL |
197 | } |
198 | } | |
199 | ||
136023e0 | 200 | impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> { |
6a06907d XL |
201 | #[inline(always)] |
202 | fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self { | |
203 | OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout } | |
204 | } | |
205 | } | |
206 | ||
136023e0 | 207 | impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> { |
b7449926 | 208 | #[inline(always)] |
a1dfa0c6 | 209 | fn from(val: ImmTy<'tcx, Tag>) -> Self { |
dfeec247 | 210 | OpTy { op: Operand::Immediate(val.imm), layout: val.layout } |
b7449926 XL |
211 | } |
212 | } | |
213 | ||
136023e0 | 214 | impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> { |
9fa01778 | 215 | #[inline] |
ba9703b0 | 216 | pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self { |
416331ca | 217 | ImmTy { imm: val.into(), layout } |
9fa01778 XL |
218 | } |
219 | ||
dfeec247 | 220 | #[inline] |
ba9703b0 XL |
221 | pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self { |
222 | ImmTy { imm, layout } | |
223 | } | |
224 | ||
225 | #[inline] | |
226 | pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> { | |
dfeec247 XL |
227 | Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout)) |
228 | } | |
e1599b0c | 229 | #[inline] |
ba9703b0 | 230 | pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self { |
e1599b0c XL |
231 | Self::from_scalar(Scalar::from_uint(i, layout.size), layout) |
232 | } | |
233 | ||
dfeec247 | 234 | #[inline] |
ba9703b0 | 235 | pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> { |
dfeec247 XL |
236 | Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout)) |
237 | } | |
238 | ||
e1599b0c | 239 | #[inline] |
ba9703b0 | 240 | pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self { |
e1599b0c XL |
241 | Self::from_scalar(Scalar::from_int(i, layout.size), layout) |
242 | } | |
f035d41b XL |
243 | |
244 | #[inline] | |
245 | pub fn to_const_int(self) -> ConstInt { | |
246 | assert!(self.layout.ty.is_integral()); | |
29967ef6 XL |
247 | let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int(); |
248 | ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral()) | |
f035d41b | 249 | } |
9fa01778 XL |
250 | } |
251 | ||
ba9703b0 | 252 | impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { |
dc9dc135 | 253 | /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. |
9fa01778 | 254 | /// Returns `None` if the layout does not permit loading this as a value. |
04454e1e FG |
255 | /// |
256 | /// This is an internal function; call `read_immediate` instead. | |
257 | fn read_immediate_from_mplace_raw( | |
b7449926 | 258 | &self, |
6a06907d | 259 | mplace: &MPlaceTy<'tcx, M::PointerTag>, |
04454e1e | 260 | force: bool, |
dc9dc135 | 261 | ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> { |
b7449926 | 262 | if mplace.layout.is_unsized() { |
0bf4aa26 | 263 | // Don't touch unsized |
b7449926 XL |
264 | return Ok(None); |
265 | } | |
b7449926 | 266 | |
04454e1e | 267 | let Some(alloc) = self.get_place_alloc(mplace)? else { |
5e7ed085 FG |
268 | return Ok(Some(ImmTy { |
269 | // zero-sized type | |
270 | imm: Scalar::ZST.into(), | |
271 | layout: mplace.layout, | |
272 | })); | |
dc9dc135 | 273 | }; |
b7449926 | 274 | |
04454e1e FG |
275 | // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point. |
276 | // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned -- | |
277 | // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the | |
278 | // case where some of the bytes are initialized and others are not. So, we need an extra | |
279 | // check that walks over the type of `mplace` to make sure it is truly correct to treat this | |
280 | // like a `Scalar` (or `ScalarPair`). | |
281 | let scalar_layout = match mplace.layout.abi { | |
282 | // `if` does not work nested inside patterns, making this a bit awkward to express. | |
283 | Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s), | |
284 | Abi::Scalar(s) if force => Some(s.primitive()), | |
285 | _ => None, | |
286 | }; | |
923072b8 FG |
287 | let read_provenance = |s: abi::Primitive, size| { |
288 | // Should be just `s.is_ptr()`, but we support a Miri flag that accepts more | |
289 | // questionable ptr-int transmutes. | |
290 | let number_may_have_provenance = !M::enforce_number_no_provenance(self); | |
291 | s.is_ptr() || (number_may_have_provenance && size == self.pointer_size()) | |
292 | }; | |
293 | if let Some(s) = scalar_layout { | |
04454e1e FG |
294 | //FIXME(#96185): let size = s.size(self); |
295 | //FIXME(#96185): assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); | |
296 | let size = mplace.layout.size; //FIXME(#96185): remove this line | |
923072b8 FG |
297 | let scalar = |
298 | alloc.read_scalar(alloc_range(Size::ZERO, size), read_provenance(s, size))?; | |
04454e1e | 299 | return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout })); |
b7449926 | 300 | } |
04454e1e FG |
301 | let scalar_pair_layout = match mplace.layout.abi { |
302 | Abi::ScalarPair( | |
303 | abi::Scalar::Initialized { value: a, .. }, | |
304 | abi::Scalar::Initialized { value: b, .. }, | |
305 | ) => Some((a, b)), | |
306 | Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())), | |
307 | _ => None, | |
308 | }; | |
309 | if let Some((a, b)) = scalar_pair_layout { | |
310 | // We checked `ptr_align` above, so all fields will have the alignment they need. | |
311 | // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, | |
312 | // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. | |
313 | let (a_size, b_size) = (a.size(self), b.size(self)); | |
314 | let b_offset = a_size.align_to(b.align(self).abi); | |
315 | assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields | |
923072b8 FG |
316 | let a_val = |
317 | alloc.read_scalar(alloc_range(Size::ZERO, a_size), read_provenance(a, a_size))?; | |
318 | let b_val = | |
319 | alloc.read_scalar(alloc_range(b_offset, b_size), read_provenance(b, b_size))?; | |
04454e1e FG |
320 | return Ok(Some(ImmTy { |
321 | imm: Immediate::ScalarPair(a_val, b_val), | |
322 | layout: mplace.layout, | |
323 | })); | |
324 | } | |
325 | // Neither a scalar nor scalar pair. | |
326 | return Ok(None); | |
b7449926 XL |
327 | } |
328 | ||
04454e1e FG |
329 | /// Try returning an immediate for the operand. If the layout does not permit loading this as an |
330 | /// immediate, return where in memory we can find the data. | |
b7449926 XL |
331 | /// Note that for a given layout, this operation will either always fail or always |
332 | /// succeed! Whether it succeeds depends on whether the layout can be represented | |
94222f64 | 333 | /// in an `Immediate`, not on which data is stored there currently. |
04454e1e FG |
334 | /// |
335 | /// If `force` is `true`, then even scalars with fields that can be ununit will be | |
336 | /// read. This means the load is lossy and should not be written back! | |
337 | /// This flag exists only for validity checking. | |
338 | /// | |
339 | /// This is an internal function that should not usually be used; call `read_immediate` instead. | |
340 | pub fn read_immediate_raw( | |
b7449926 | 341 | &self, |
6a06907d | 342 | src: &OpTy<'tcx, M::PointerTag>, |
04454e1e | 343 | force: bool, |
dc9dc135 | 344 | ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> { |
136023e0 | 345 | Ok(match src.try_as_mplace() { |
6a06907d | 346 | Ok(ref mplace) => { |
04454e1e | 347 | if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? { |
b7449926 XL |
348 | Ok(val) |
349 | } else { | |
6a06907d | 350 | Err(*mplace) |
b7449926 | 351 | } |
dfeec247 | 352 | } |
b7449926 XL |
353 | Err(val) => Ok(val), |
354 | }) | |
355 | } | |
356 | ||
a1dfa0c6 | 357 | /// Read an immediate from a place, asserting that that is possible with the given layout. |
b7449926 | 358 | #[inline(always)] |
a1dfa0c6 | 359 | pub fn read_immediate( |
0bf4aa26 | 360 | &self, |
6a06907d | 361 | op: &OpTy<'tcx, M::PointerTag>, |
dc9dc135 | 362 | ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> { |
04454e1e | 363 | if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? { |
dc9dc135 | 364 | Ok(imm) |
b7449926 | 365 | } else { |
f035d41b | 366 | span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty); |
b7449926 XL |
367 | } |
368 | } | |
369 | ||
370 | /// Read a scalar from a place | |
0bf4aa26 XL |
371 | pub fn read_scalar( |
372 | &self, | |
6a06907d | 373 | op: &OpTy<'tcx, M::PointerTag>, |
f9f354fc | 374 | ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> { |
3dfed10e | 375 | Ok(self.read_immediate(op)?.to_scalar_or_uninit()) |
b7449926 XL |
376 | } |
377 | ||
136023e0 XL |
378 | /// Read a pointer from a place. |
379 | pub fn read_pointer( | |
380 | &self, | |
381 | op: &OpTy<'tcx, M::PointerTag>, | |
382 | ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> { | |
04454e1e | 383 | self.scalar_to_ptr(self.read_scalar(op)?.check_init()?) |
136023e0 XL |
384 | } |
385 | ||
60c5eb7d | 386 | // Turn the wide MPlace into a string (must already be dereferenced!) |
6a06907d | 387 | pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { |
b7449926 | 388 | let len = mplace.len(self)?; |
04454e1e | 389 | let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?; |
29967ef6 | 390 | let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?; |
b7449926 XL |
391 | Ok(str) |
392 | } | |
393 | ||
b7449926 XL |
394 | /// Projection functions |
395 | pub fn operand_field( | |
396 | &self, | |
6a06907d | 397 | op: &OpTy<'tcx, M::PointerTag>, |
ba9703b0 | 398 | field: usize, |
dc9dc135 | 399 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
136023e0 | 400 | let base = match op.try_as_mplace() { |
6a06907d | 401 | Ok(ref mplace) => { |
ba9703b0 | 402 | // We can reuse the mplace field computation logic for indirect operands. |
b7449926 XL |
403 | let field = self.mplace_field(mplace, field)?; |
404 | return Ok(field.into()); | |
dfeec247 XL |
405 | } |
406 | Err(value) => value, | |
b7449926 XL |
407 | }; |
408 | ||
04454e1e FG |
409 | let field_layout = base.layout.field(self, field); |
410 | let offset = base.layout.fields.offset(field); | |
411 | // This makes several assumptions about what layouts we will encounter; we match what | |
412 | // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`). | |
413 | let field_val: Immediate<_> = match (*base, base.layout.abi) { | |
414 | // the field contains no information | |
415 | _ if field_layout.is_zst() => Scalar::ZST.into(), | |
b7449926 | 416 | // the field covers the entire type |
04454e1e FG |
417 | _ if field_layout.size == base.layout.size => { |
418 | assert!(match (base.layout.abi, field_layout.abi) { | |
419 | (Abi::Scalar(..), Abi::Scalar(..)) => true, | |
420 | (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true, | |
421 | _ => false, | |
422 | }); | |
423 | assert!(offset.bytes() == 0); | |
424 | *base | |
425 | } | |
b7449926 | 426 | // extract fields from types with `ScalarPair` ABI |
04454e1e FG |
427 | (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => { |
428 | assert!(matches!(field_layout.abi, Abi::Scalar(..))); | |
429 | Immediate::from(if offset.bytes() == 0 { | |
430 | debug_assert_eq!(field_layout.size, a.size(self)); | |
431 | a_val | |
432 | } else { | |
433 | debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi)); | |
434 | debug_assert_eq!(field_layout.size, b.size(self)); | |
435 | b_val | |
436 | }) | |
dfeec247 | 437 | } |
04454e1e | 438 | _ => span_bug!( |
f035d41b | 439 | self.cur_span(), |
04454e1e FG |
440 | "invalid field access on immediate {}, layout {:#?}", |
441 | base, | |
442 | base.layout | |
f035d41b | 443 | ), |
b7449926 | 444 | }; |
04454e1e FG |
445 | |
446 | Ok(OpTy { op: Operand::Immediate(field_val), layout: field_layout }) | |
b7449926 XL |
447 | } |
448 | ||
ba9703b0 XL |
449 | pub fn operand_index( |
450 | &self, | |
6a06907d | 451 | op: &OpTy<'tcx, M::PointerTag>, |
ba9703b0 XL |
452 | index: u64, |
453 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { | |
454 | if let Ok(index) = usize::try_from(index) { | |
455 | // We can just treat this as a field. | |
456 | self.operand_field(op, index) | |
457 | } else { | |
458 | // Indexing into a big array. This must be an mplace. | |
136023e0 | 459 | let mplace = op.assert_mem_place(); |
6a06907d | 460 | Ok(self.mplace_index(&mplace, index)?.into()) |
ba9703b0 XL |
461 | } |
462 | } | |
463 | ||
b7449926 XL |
464 | pub fn operand_downcast( |
465 | &self, | |
6a06907d | 466 | op: &OpTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 467 | variant: VariantIdx, |
dc9dc135 | 468 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
136023e0 | 469 | Ok(match op.try_as_mplace() { |
6a06907d | 470 | Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(), |
b7449926 | 471 | Err(..) => { |
5e7ed085 FG |
472 | // Downcasts only change the layout. |
473 | // (In particular, no check about whether this is even the active variant -- that's by design, | |
474 | // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.) | |
b7449926 | 475 | let layout = op.layout.for_variant(self, variant); |
6a06907d | 476 | OpTy { layout, ..*op } |
b7449926 XL |
477 | } |
478 | }) | |
479 | } | |
480 | ||
04454e1e | 481 | #[instrument(skip(self), level = "debug")] |
b7449926 XL |
482 | pub fn operand_projection( |
483 | &self, | |
6a06907d | 484 | base: &OpTy<'tcx, M::PointerTag>, |
f9f354fc | 485 | proj_elem: mir::PlaceElem<'tcx>, |
dc9dc135 | 486 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
ba9703b0 | 487 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 488 | Ok(match proj_elem { |
ba9703b0 | 489 | Field(field, _) => self.operand_field(base, field.index())?, |
b7449926 XL |
490 | Downcast(_, variant) => self.operand_downcast(base, variant)?, |
491 | Deref => self.deref_operand(base)?.into(), | |
dfeec247 | 492 | Subslice { .. } | ConstantIndex { .. } | Index(_) => { |
b7449926 XL |
493 | // The rest should only occur as mplace, we do not use Immediates for types |
494 | // allowing such operations. This matches place_projection forcing an allocation. | |
136023e0 | 495 | let mplace = base.assert_mem_place(); |
6a06907d | 496 | self.mplace_projection(&mplace, proj_elem)?.into() |
b7449926 XL |
497 | } |
498 | }) | |
499 | } | |
500 | ||
3c0e092e XL |
501 | /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements. |
502 | /// Also returns the number of elements. | |
503 | pub fn operand_to_simd( | |
504 | &self, | |
505 | base: &OpTy<'tcx, M::PointerTag>, | |
506 | ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { | |
507 | // Basically we just transmute this place into an array following simd_size_and_type. | |
508 | // This only works in memory, but repr(simd) types should never be immediates anyway. | |
509 | assert!(base.layout.ty.is_simd()); | |
510 | self.mplace_to_simd(&base.assert_mem_place()) | |
511 | } | |
512 | ||
f035d41b XL |
513 | /// Read from a local. Will not actually access the local if reading from a ZST. |
514 | /// Will not access memory, instead an indirect `Operand` is returned. | |
515 | /// | |
516 | /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an | |
517 | /// OpTy from a local | |
0bf4aa26 XL |
518 | pub fn access_local( |
519 | &self, | |
a1dfa0c6 | 520 | frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, |
0bf4aa26 | 521 | local: mir::Local, |
ba9703b0 | 522 | layout: Option<TyAndLayout<'tcx>>, |
dc9dc135 | 523 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
9fa01778 | 524 | let layout = self.layout_of_local(frame, local, layout)?; |
48663c56 XL |
525 | let op = if layout.is_zst() { |
526 | // Do not read from ZST, they might not be initialized | |
29967ef6 | 527 | Operand::Immediate(Scalar::ZST.into()) |
48663c56 | 528 | } else { |
e74abb32 | 529 | M::access_local(&self, frame, local)? |
48663c56 | 530 | }; |
0bf4aa26 XL |
531 | Ok(OpTy { op, layout }) |
532 | } | |
533 | ||
ba9703b0 XL |
534 | /// Every place can be read from, so we can turn them into an operand. |
535 | /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this | |
536 | /// will never actually read from memory. | |
9fa01778 XL |
537 | #[inline(always)] |
538 | pub fn place_to_op( | |
539 | &self, | |
6a06907d | 540 | place: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 541 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
6a06907d | 542 | let op = match **place { |
dfeec247 | 543 | Place::Ptr(mplace) => Operand::Indirect(mplace), |
ba9703b0 XL |
544 | Place::Local { frame, local } => { |
545 | *self.access_local(&self.stack()[frame], local, None)? | |
546 | } | |
9fa01778 XL |
547 | }; |
548 | Ok(OpTy { op, layout: place.layout }) | |
549 | } | |
550 | ||
04454e1e FG |
551 | /// Evaluate a place with the goal of reading from it. This lets us sometimes |
552 | /// avoid allocations. | |
e74abb32 | 553 | pub fn eval_place_to_op( |
b7449926 | 554 | &self, |
ba9703b0 XL |
555 | place: mir::Place<'tcx>, |
556 | layout: Option<TyAndLayout<'tcx>>, | |
dc9dc135 | 557 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
f9f354fc XL |
558 | // Do not use the layout passed in as argument if the base we are looking at |
559 | // here is not the entire place. | |
560 | let layout = if place.projection.is_empty() { layout } else { None }; | |
561 | ||
562 | let base_op = self.access_local(self.frame(), place.local, layout)?; | |
b7449926 | 563 | |
dfeec247 XL |
564 | let op = place |
565 | .projection | |
566 | .iter() | |
6a06907d | 567 | .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?; |
e1599b0c XL |
568 | |
569 | trace!("eval_place_to_op: got {:?}", *op); | |
f9f354fc XL |
570 | // Sanity-check the type we ended up with. |
571 | debug_assert!(mir_assign_valid_types( | |
572 | *self.tcx, | |
f035d41b | 573 | self.param_env, |
f9f354fc XL |
574 | self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( |
575 | place.ty(&self.frame().body.local_decls, *self.tcx).ty | |
a2a8927a | 576 | )?)?, |
f9f354fc XL |
577 | op.layout, |
578 | )); | |
e1599b0c | 579 | Ok(op) |
b7449926 XL |
580 | } |
581 | ||
582 | /// Evaluate the operand, returning a place where you can then find the data. | |
dc9dc135 | 583 | /// If you already know the layout, you can save two table lookups |
b7449926 | 584 | /// by passing it in here. |
6a06907d | 585 | #[inline] |
b7449926 XL |
586 | pub fn eval_operand( |
587 | &self, | |
588 | mir_op: &mir::Operand<'tcx>, | |
ba9703b0 | 589 | layout: Option<TyAndLayout<'tcx>>, |
dc9dc135 | 590 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
ba9703b0 | 591 | use rustc_middle::mir::Operand::*; |
b7449926 XL |
592 | let op = match *mir_op { |
593 | // FIXME: do some more logic on `move` to invalidate the old location | |
ba9703b0 | 594 | Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?, |
b7449926 | 595 | |
e1599b0c | 596 | Constant(ref constant) => { |
ba9703b0 | 597 | let val = |
a2a8927a | 598 | self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?; |
5869c6ff | 599 | // This can still fail: |
5e7ed085 | 600 | // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all |
5869c6ff XL |
601 | // checked yet. |
602 | // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail. | |
6a06907d XL |
603 | |
604 | self.mir_const_to_op(&val, layout)? | |
e1599b0c | 605 | } |
b7449926 XL |
606 | }; |
607 | trace!("{:?}: {:?}", mir_op, *op); | |
608 | Ok(op) | |
609 | } | |
610 | ||
611 | /// Evaluate a bunch of operands at once | |
612 | pub(super) fn eval_operands( | |
613 | &self, | |
614 | ops: &[mir::Operand<'tcx>], | |
dc9dc135 | 615 | ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> { |
74b04a01 | 616 | ops.iter().map(|op| self.eval_operand(op, None)).collect() |
b7449926 XL |
617 | } |
618 | ||
9fa01778 XL |
619 | // Used when the miri-engine runs into a constant and for extracting information from constants |
620 | // in patterns via the `const_eval` module | |
e1599b0c XL |
621 | /// The `val` and `layout` are assumed to already be in our interpreter |
622 | /// "universe" (param_env). | |
c295e0f8 | 623 | pub fn const_to_op( |
9fa01778 | 624 | &self, |
923072b8 | 625 | c: ty::Const<'tcx>, |
ba9703b0 | 626 | layout: Option<TyAndLayout<'tcx>>, |
dc9dc135 | 627 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { |
923072b8 | 628 | match c.kind() { |
1b1a35ee | 629 | ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric), |
5e7ed085 FG |
630 | ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => { |
631 | throw_inval!(AlreadyReported(reported)) | |
632 | } | |
94222f64 | 633 | ty::ConstKind::Unevaluated(uv) => { |
5099ac24 | 634 | let instance = self.resolve(uv.def, uv.substs)?; |
94222f64 | 635 | Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into()) |
dfeec247 | 636 | } |
1b1a35ee | 637 | ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => { |
923072b8 FG |
638 | span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c) |
639 | } | |
640 | ty::ConstKind::Value(valtree) => { | |
641 | let ty = c.ty(); | |
642 | let const_val = self.tcx.valtree_to_const_val((ty, valtree)); | |
643 | self.const_val_to_op(const_val, ty, layout) | |
dc9dc135 | 644 | } |
6a06907d XL |
645 | } |
646 | } | |
647 | ||
c295e0f8 | 648 | pub fn mir_const_to_op( |
6a06907d XL |
649 | &self, |
650 | val: &mir::ConstantKind<'tcx>, | |
651 | layout: Option<TyAndLayout<'tcx>>, | |
652 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { | |
653 | match val { | |
5099ac24 FG |
654 | mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout), |
655 | mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout), | |
6a06907d XL |
656 | } |
657 | } | |
658 | ||
923072b8 | 659 | pub(crate) fn const_val_to_op( |
6a06907d XL |
660 | &self, |
661 | val_val: ConstValue<'tcx>, | |
662 | ty: Ty<'tcx>, | |
663 | layout: Option<TyAndLayout<'tcx>>, | |
664 | ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { | |
dc9dc135 | 665 | // Other cases need layout. |
6a06907d XL |
666 | let tag_scalar = |scalar| -> InterpResult<'tcx, _> { |
667 | Ok(match scalar { | |
136023e0 | 668 | Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size), |
6a06907d XL |
669 | Scalar::Int(int) => Scalar::Int(int), |
670 | }) | |
671 | }; | |
672 | let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?; | |
60c5eb7d | 673 | let op = match val_val { |
416331ca | 674 | ConstValue::ByRef { alloc, offset } => { |
f9f354fc | 675 | let id = self.tcx.create_memory_alloc(alloc); |
dc9dc135 XL |
676 | // We rely on mutability being set correctly in that allocation to prevent writes |
677 | // where none should happen. | |
3dfed10e | 678 | let ptr = self.global_base_pointer(Pointer::new(id, offset))?; |
136023e0 | 679 | Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi)) |
dfeec247 | 680 | } |
94222f64 | 681 | ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()), |
dc9dc135 XL |
682 | ConstValue::Slice { data, start, end } => { |
683 | // We rely on mutability being set correctly in `data` to prevent writes | |
684 | // where none should happen. | |
685 | let ptr = Pointer::new( | |
f9f354fc | 686 | self.tcx.create_memory_alloc(data), |
ba9703b0 | 687 | Size::from_bytes(start), // offset: `start` |
dc9dc135 XL |
688 | ); |
689 | Operand::Immediate(Immediate::new_slice( | |
136023e0 | 690 | Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx), |
ba9703b0 | 691 | u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start` |
dc9dc135 XL |
692 | self, |
693 | )) | |
694 | } | |
dc9dc135 XL |
695 | }; |
696 | Ok(OpTy { op, layout }) | |
b7449926 | 697 | } |
b7449926 XL |
698 | |
699 | /// Read discriminant, return the runtime value as well as the variant index. | |
c295e0f8 | 700 | /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)! |
b7449926 XL |
701 | pub fn read_discriminant( |
702 | &self, | |
6a06907d | 703 | op: &OpTy<'tcx, M::PointerTag>, |
f9f354fc XL |
704 | ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> { |
705 | trace!("read_discriminant_value {:#?}", op.layout); | |
f9f354fc XL |
706 | // Get type and layout of the discriminant. |
707 | let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?; | |
708 | trace!("discriminant type: {:?}", discr_layout.ty); | |
709 | ||
710 | // We use "discriminant" to refer to the value associated with a particular enum variant. | |
711 | // This is not to be confused with its "variant index", which is just determining its position in the | |
712 | // declared list of variants -- they can differ with explicitly assigned discriminants. | |
713 | // We use "tag" to refer to how the discriminant is encoded in memory, which can be either | |
f035d41b XL |
714 | // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`). |
715 | let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants { | |
ba9703b0 | 716 | Variants::Single { index } => { |
f9f354fc XL |
717 | let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) { |
718 | Some(discr) => { | |
719 | // This type actually has discriminants. | |
720 | assert_eq!(discr.ty, discr_layout.ty); | |
721 | Scalar::from_uint(discr.val, discr_layout.size) | |
722 | } | |
723 | None => { | |
724 | // On a type without actual discriminants, variant is 0. | |
725 | assert_eq!(index.as_u32(), 0); | |
726 | Scalar::from_uint(index.as_u32(), discr_layout.size) | |
727 | } | |
728 | }; | |
729 | return Ok((discr, index)); | |
b7449926 | 730 | } |
c295e0f8 | 731 | Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { |
f035d41b | 732 | (tag, tag_encoding, tag_field) |
ba9703b0 | 733 | } |
532ac7d7 XL |
734 | }; |
735 | ||
f9f354fc XL |
736 | // There are *three* layouts that come into play here: |
737 | // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for | |
738 | // the `Scalar` we return. | |
739 | // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type, | |
740 | // and used to interpret the value we read from the tag field. | |
741 | // For the return value, a cast to `discr_layout` is performed. | |
742 | // - The field storing the tag has a layout, which is very similar to `tag_layout` but | |
743 | // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks. | |
744 | ||
745 | // Get layout for tag. | |
04454e1e | 746 | let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?; |
f9f354fc XL |
747 | |
748 | // Read tag and sanity-check `tag_layout`. | |
6a06907d | 749 | let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?; |
f9f354fc XL |
750 | assert_eq!(tag_layout.size, tag_val.layout.size); |
751 | assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); | |
5e7ed085 | 752 | trace!("tag value: {}", tag_val); |
f9f354fc XL |
753 | |
754 | // Figure out which discriminant and variant this corresponds to. | |
f035d41b XL |
755 | Ok(match *tag_encoding { |
756 | TagEncoding::Direct => { | |
04454e1e | 757 | let scalar = tag_val.to_scalar()?; |
5e7ed085 FG |
758 | // Generate a specific error if `tag_val` is not an integer. |
759 | // (`tag_bits` itself is only used for error messages below.) | |
04454e1e | 760 | let tag_bits = scalar |
136023e0 XL |
761 | .try_to_int() |
762 | .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))? | |
763 | .assert_bits(tag_layout.size); | |
f9f354fc | 764 | // Cast bits from tag layout to discriminant layout. |
04454e1e FG |
765 | // After the checks we did above, this cannot fail, as |
766 | // discriminants are int-like. | |
5e7ed085 | 767 | let discr_val = |
04454e1e | 768 | self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap(); |
f035d41b | 769 | let discr_bits = discr_val.assert_bits(discr_layout.size); |
f9f354fc | 770 | // Convert discriminant to variant index, and catch invalid discriminants. |
1b1a35ee | 771 | let index = match *op.layout.ty.kind() { |
dfeec247 | 772 | ty::Adt(adt, _) => { |
f035d41b | 773 | adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits) |
dfeec247 | 774 | } |
e74abb32 XL |
775 | ty::Generator(def_id, substs, _) => { |
776 | let substs = substs.as_generator(); | |
777 | substs | |
f035d41b | 778 | .discriminants(def_id, *self.tcx) |
f9f354fc | 779 | .find(|(_, var)| var.val == discr_bits) |
e74abb32 | 780 | } |
f035d41b | 781 | _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"), |
dfeec247 | 782 | } |
136023e0 | 783 | .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?; |
f9f354fc | 784 | // Return the cast value, and the index. |
f035d41b | 785 | (discr_val, index.0) |
dfeec247 | 786 | } |
f035d41b | 787 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => { |
5e7ed085 | 788 | let tag_val = tag_val.to_scalar()?; |
f9f354fc XL |
789 | // Compute the variant this niche value/"tag" corresponds to. With niche layout, |
790 | // discriminant (encoded in niche/tag) and variant index are the same. | |
e1599b0c XL |
791 | let variants_start = niche_variants.start().as_u32(); |
792 | let variants_end = niche_variants.end().as_u32(); | |
136023e0 XL |
793 | let variant = match tag_val.try_to_int() { |
794 | Err(dbg_val) => { | |
795 | // So this is a pointer then, and casting to an int failed. | |
796 | // Can only happen during CTFE. | |
136023e0 XL |
797 | // The niche must be just 0, and the ptr not null, then we know this is |
798 | // okay. Everything else, we conservatively reject. | |
dfeec247 XL |
799 | let ptr_valid = niche_start == 0 |
800 | && variants_start == variants_end | |
04454e1e | 801 | && !self.scalar_may_be_null(tag_val)?; |
a1dfa0c6 | 802 | if !ptr_valid { |
136023e0 | 803 | throw_ub!(InvalidTag(dbg_val)) |
a1dfa0c6 | 804 | } |
f9f354fc | 805 | dataful_variant |
dfeec247 | 806 | } |
f9f354fc | 807 | Ok(tag_bits) => { |
136023e0 | 808 | let tag_bits = tag_bits.assert_bits(tag_layout.size); |
e1599b0c | 809 | // We need to use machine arithmetic to get the relative variant idx: |
f9f354fc XL |
810 | // variant_index_relative = tag_val - niche_start_val |
811 | let tag_val = ImmTy::from_uint(tag_bits, tag_layout); | |
812 | let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); | |
dfeec247 | 813 | let variant_index_relative_val = |
6a06907d | 814 | self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?; |
e1599b0c XL |
815 | let variant_index_relative = variant_index_relative_val |
816 | .to_scalar()? | |
f9f354fc | 817 | .assert_bits(tag_val.layout.size); |
e1599b0c XL |
818 | // Check if this is in the range that indicates an actual discriminant. |
819 | if variant_index_relative <= u128::from(variants_end - variants_start) { | |
820 | let variant_index_relative = u32::try_from(variant_index_relative) | |
821 | .expect("we checked that this fits into a u32"); | |
822 | // Then computing the absolute variant idx should not overflow any more. | |
823 | let variant_index = variants_start | |
824 | .checked_add(variant_index_relative) | |
74b04a01 | 825 | .expect("overflow computing absolute variant idx"); |
f9f354fc | 826 | let variants_len = op |
dfeec247 XL |
827 | .layout |
828 | .ty | |
a1dfa0c6 XL |
829 | .ty_adt_def() |
830 | .expect("tagged layout for non adt") | |
5e7ed085 | 831 | .variants() |
dfeec247 | 832 | .len(); |
ba9703b0 | 833 | assert!(usize::try_from(variant_index).unwrap() < variants_len); |
f9f354fc | 834 | VariantIdx::from_u32(variant_index) |
b7449926 | 835 | } else { |
f9f354fc | 836 | dataful_variant |
b7449926 | 837 | } |
dfeec247 | 838 | } |
f9f354fc XL |
839 | }; |
840 | // Compute the size of the scalar we need to return. | |
841 | // No need to cast, because the variant index directly serves as discriminant and is | |
842 | // encoded in the tag. | |
843 | (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant) | |
b7449926 XL |
844 | } |
845 | }) | |
846 | } | |
b7449926 | 847 | } |