]>
Commit | Line | Data |
---|---|---|
b7449926 | 1 | //! Intrinsics and other functions that the miri engine executes without |
9fa01778 | 2 | //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE |
b7449926 XL |
3 | //! and miri. |
4 | ||
ba9703b0 XL |
5 | use std::convert::TryFrom; |
6 | ||
7 | use rustc_hir::def_id::DefId; | |
8 | use rustc_middle::mir::{ | |
dfeec247 | 9 | self, |
064997fb | 10 | interpret::{ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar}, |
dfeec247 XL |
11 | BinOp, |
12 | }; | |
ba9703b0 | 13 | use rustc_middle::ty; |
c295e0f8 | 14 | use rustc_middle::ty::layout::LayoutOf as _; |
ba9703b0 | 15 | use rustc_middle::ty::subst::SubstsRef; |
f9f354fc | 16 | use rustc_middle::ty::{Ty, TyCtxt}; |
dfeec247 | 17 | use rustc_span::symbol::{sym, Symbol}; |
064997fb | 18 | use rustc_target::abi::{Abi, Align, Primitive, Size}; |
b7449926 | 19 | |
3dfed10e XL |
20 | use super::{ |
21 | util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy, | |
136023e0 | 22 | Pointer, |
3dfed10e | 23 | }; |
b7449926 | 24 | |
e74abb32 | 25 | mod caller_location; |
dc9dc135 XL |
26 | mod type_name; |
27 | ||
064997fb | 28 | fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> { |
b7449926 XL |
29 | let size = match kind { |
30 | Primitive::Int(integer, _) => integer.size(), | |
31 | _ => bug!("invalid `{}` argument: {:?}", name, bits), | |
32 | }; | |
ba9703b0 | 33 | let extra = 128 - u128::from(size.bits()); |
b7449926 | 34 | let bits_out = match name { |
ba9703b0 XL |
35 | sym::ctpop => u128::from(bits.count_ones()), |
36 | sym::ctlz => u128::from(bits.leading_zeros()) - extra, | |
37 | sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra, | |
60c5eb7d XL |
38 | sym::bswap => (bits << extra).swap_bytes(), |
39 | sym::bitreverse => (bits << extra).reverse_bits(), | |
b7449926 XL |
40 | _ => bug!("not a numeric intrinsic: {}", name), |
41 | }; | |
6a06907d | 42 | Scalar::from_uint(bits_out, size) |
b7449926 XL |
43 | } |
44 | ||
e1599b0c XL |
45 | /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated |
46 | /// inside an `InterpCx` and instead have their value computed directly from rustc internal info. | |
923072b8 | 47 | pub(crate) fn eval_nullary_intrinsic<'tcx>( |
e1599b0c XL |
48 | tcx: TyCtxt<'tcx>, |
49 | param_env: ty::ParamEnv<'tcx>, | |
50 | def_id: DefId, | |
51 | substs: SubstsRef<'tcx>, | |
74b04a01 | 52 | ) -> InterpResult<'tcx, ConstValue<'tcx>> { |
e1599b0c | 53 | let tp_ty = substs.type_at(0); |
60c5eb7d | 54 | let name = tcx.item_name(def_id); |
e1599b0c | 55 | Ok(match name { |
60c5eb7d | 56 | sym::type_name => { |
3dfed10e | 57 | ensure_monomorphic_enough(tcx, tp_ty)?; |
e1599b0c | 58 | let alloc = type_name::alloc_type_name(tcx, tp_ty); |
5e7ed085 | 59 | ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() } |
dfeec247 | 60 | } |
17df50a5 XL |
61 | sym::needs_drop => { |
62 | ensure_monomorphic_enough(tcx, tp_ty)?; | |
63 | ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)) | |
64 | } | |
c295e0f8 | 65 | sym::pref_align_of => { |
17df50a5 | 66 | // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. |
e1599b0c | 67 | let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?; |
c295e0f8 | 68 | ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx) |
dfeec247 | 69 | } |
3dfed10e XL |
70 | sym::type_id => { |
71 | ensure_monomorphic_enough(tcx, tp_ty)?; | |
72 | ConstValue::from_u64(tcx.type_id_hash(tp_ty)) | |
73 | } | |
fc512014 | 74 | sym::variant_count => match tp_ty.kind() { |
17df50a5 | 75 | // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. |
5e7ed085 FG |
76 | ty::Adt(ref adt, _) => { |
77 | ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx) | |
78 | } | |
fc512014 XL |
79 | ty::Projection(_) |
80 | | ty::Opaque(_, _) | |
81 | | ty::Param(_) | |
82 | | ty::Bound(_, _) | |
83 | | ty::Placeholder(_) | |
84 | | ty::Infer(_) => throw_inval!(TooGeneric), | |
85 | ty::Bool | |
86 | | ty::Char | |
87 | | ty::Int(_) | |
88 | | ty::Uint(_) | |
89 | | ty::Float(_) | |
90 | | ty::Foreign(_) | |
91 | | ty::Str | |
92 | | ty::Array(_, _) | |
93 | | ty::Slice(_) | |
94 | | ty::RawPtr(_) | |
95 | | ty::Ref(_, _, _) | |
96 | | ty::FnDef(_, _) | |
97 | | ty::FnPtr(_) | |
98 | | ty::Dynamic(_, _) | |
99 | | ty::Closure(_, _) | |
100 | | ty::Generator(_, _, _) | |
101 | | ty::GeneratorWitness(_) | |
102 | | ty::Never | |
103 | | ty::Tuple(_) | |
104 | | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx), | |
105 | }, | |
e1599b0c XL |
106 | other => bug!("`{}` is not a zero arg intrinsic", other), |
107 | }) | |
108 | } | |
109 | ||
ba9703b0 | 110 | impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { |
9fa01778 | 111 | /// Returns `true` if emulation happened. |
1b1a35ee XL |
112 | /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own |
113 | /// intrinsic handling. | |
b7449926 XL |
114 | pub fn emulate_intrinsic( |
115 | &mut self, | |
116 | instance: ty::Instance<'tcx>, | |
064997fb FG |
117 | args: &[OpTy<'tcx, M::Provenance>], |
118 | dest: &PlaceTy<'tcx, M::Provenance>, | |
923072b8 | 119 | ret: Option<mir::BasicBlock>, |
dc9dc135 | 120 | ) -> InterpResult<'tcx, bool> { |
b7449926 | 121 | let substs = instance.substs; |
60c5eb7d XL |
122 | let intrinsic_name = self.tcx.item_name(instance.def_id()); |
123 | ||
ba9703b0 | 124 | // First handle intrinsics without return place. |
923072b8 | 125 | let ret = match ret { |
60c5eb7d | 126 | None => match intrinsic_name { |
ba9703b0 | 127 | sym::transmute => throw_ub_format!("transmuting to uninhabited type"), |
fc512014 | 128 | sym::abort => M::abort(self, "the program aborted execution".to_owned())?, |
ba9703b0 | 129 | // Unsupported diverging intrinsic. |
60c5eb7d | 130 | _ => return Ok(false), |
dfeec247 | 131 | }, |
ba9703b0 | 132 | Some(p) => p, |
60c5eb7d | 133 | }; |
b7449926 | 134 | |
b7449926 | 135 | match intrinsic_name { |
60c5eb7d | 136 | sym::caller_location => { |
ba9703b0 | 137 | let span = self.find_closest_untracked_caller_location(); |
60c5eb7d | 138 | let location = self.alloc_caller_location_for_span(span); |
136023e0 | 139 | self.write_immediate(location.to_ref(self), dest)?; |
e74abb32 XL |
140 | } |
141 | ||
3dfed10e | 142 | sym::min_align_of_val | sym::size_of_val => { |
5869c6ff | 143 | // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be |
a2a8927a | 144 | // dereferenceable! |
6a06907d | 145 | let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?; |
3dfed10e | 146 | let (size, align) = self |
6a06907d | 147 | .size_and_align_of_mplace(&place)? |
3dfed10e XL |
148 | .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?; |
149 | ||
150 | let result = match intrinsic_name { | |
151 | sym::min_align_of_val => align.bytes(), | |
152 | sym::size_of_val => size.bytes(), | |
153 | _ => bug!(), | |
154 | }; | |
155 | ||
156 | self.write_scalar(Scalar::from_machine_usize(result, self), dest)?; | |
157 | } | |
158 | ||
c295e0f8 | 159 | sym::pref_align_of |
dfeec247 | 160 | | sym::needs_drop |
dfeec247 | 161 | | sym::type_id |
f035d41b XL |
162 | | sym::type_name |
163 | | sym::variant_count => { | |
dfeec247 | 164 | let gid = GlobalId { instance, promoted: None }; |
74b04a01 | 165 | let ty = match intrinsic_name { |
c295e0f8 | 166 | sym::pref_align_of | sym::variant_count => self.tcx.types.usize, |
74b04a01 XL |
167 | sym::needs_drop => self.tcx.types.bool, |
168 | sym::type_id => self.tcx.types.u64, | |
169 | sym::type_name => self.tcx.mk_static_str(), | |
923072b8 | 170 | _ => bug!(), |
74b04a01 | 171 | }; |
1b1a35ee XL |
172 | let val = |
173 | self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?; | |
cdc7bbd5 | 174 | let val = self.const_val_to_op(val, ty, Some(dest.layout))?; |
064997fb | 175 | self.copy_op(&val, dest, /*allow_transmute*/ false)?; |
dc9dc135 XL |
176 | } |
177 | ||
dfeec247 | 178 | sym::ctpop |
60c5eb7d XL |
179 | | sym::cttz |
180 | | sym::cttz_nonzero | |
181 | | sym::ctlz | |
182 | | sym::ctlz_nonzero | |
183 | | sym::bswap | |
184 | | sym::bitreverse => { | |
b7449926 XL |
185 | let ty = substs.type_at(0); |
186 | let layout_of = self.layout_of(ty)?; | |
6a06907d | 187 | let val = self.read_scalar(&args[0])?.check_init()?; |
136023e0 | 188 | let bits = val.to_bits(layout_of.size)?; |
b7449926 | 189 | let kind = match layout_of.abi { |
04454e1e | 190 | Abi::Scalar(scalar) => scalar.primitive(), |
f035d41b XL |
191 | _ => span_bug!( |
192 | self.cur_span(), | |
193 | "{} called on invalid type {:?}", | |
194 | intrinsic_name, | |
195 | ty | |
196 | ), | |
b7449926 | 197 | }; |
60c5eb7d XL |
198 | let (nonzero, intrinsic_name) = match intrinsic_name { |
199 | sym::cttz_nonzero => (true, sym::cttz), | |
200 | sym::ctlz_nonzero => (true, sym::ctlz), | |
201 | other => (false, other), | |
b7449926 | 202 | }; |
60c5eb7d XL |
203 | if nonzero && bits == 0 { |
204 | throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name); | |
205 | } | |
6a06907d | 206 | let out_val = numeric_intrinsic(intrinsic_name, bits, kind); |
b7449926 XL |
207 | self.write_scalar(out_val, dest)?; |
208 | } | |
fc512014 | 209 | sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { |
6a06907d XL |
210 | let lhs = self.read_immediate(&args[0])?; |
211 | let rhs = self.read_immediate(&args[1])?; | |
fc512014 XL |
212 | let bin_op = match intrinsic_name { |
213 | sym::add_with_overflow => BinOp::Add, | |
214 | sym::sub_with_overflow => BinOp::Sub, | |
215 | sym::mul_with_overflow => BinOp::Mul, | |
923072b8 | 216 | _ => bug!(), |
b7449926 | 217 | }; |
064997fb FG |
218 | self.binop_with_overflow( |
219 | bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest, | |
220 | )?; | |
b7449926 | 221 | } |
60c5eb7d | 222 | sym::saturating_add | sym::saturating_sub => { |
6a06907d XL |
223 | let l = self.read_immediate(&args[0])?; |
224 | let r = self.read_immediate(&args[1])?; | |
5e7ed085 FG |
225 | let val = self.saturating_arith( |
226 | if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub }, | |
6a06907d XL |
227 | &l, |
228 | &r, | |
229 | )?; | |
9fa01778 XL |
230 | self.write_scalar(val, dest)?; |
231 | } | |
ba9703b0 | 232 | sym::discriminant_value => { |
6a06907d XL |
233 | let place = self.deref_operand(&args[0])?; |
234 | let discr_val = self.read_discriminant(&place.into())?.0; | |
f9f354fc | 235 | self.write_scalar(discr_val, dest)?; |
ba9703b0 | 236 | } |
74b04a01 XL |
237 | sym::unchecked_shl |
238 | | sym::unchecked_shr | |
239 | | sym::unchecked_add | |
240 | | sym::unchecked_sub | |
241 | | sym::unchecked_mul | |
242 | | sym::unchecked_div | |
243 | | sym::unchecked_rem => { | |
6a06907d XL |
244 | let l = self.read_immediate(&args[0])?; |
245 | let r = self.read_immediate(&args[1])?; | |
b7449926 | 246 | let bin_op = match intrinsic_name { |
60c5eb7d XL |
247 | sym::unchecked_shl => BinOp::Shl, |
248 | sym::unchecked_shr => BinOp::Shr, | |
74b04a01 XL |
249 | sym::unchecked_add => BinOp::Add, |
250 | sym::unchecked_sub => BinOp::Sub, | |
251 | sym::unchecked_mul => BinOp::Mul, | |
252 | sym::unchecked_div => BinOp::Div, | |
253 | sym::unchecked_rem => BinOp::Rem, | |
923072b8 | 254 | _ => bug!(), |
b7449926 | 255 | }; |
6a06907d | 256 | let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?; |
b7449926 XL |
257 | if overflowed { |
258 | let layout = self.layout_of(substs.type_at(0))?; | |
136023e0 | 259 | let r_val = r.to_scalar()?.to_bits(layout.size)?; |
74b04a01 | 260 | if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name { |
ba9703b0 | 261 | throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name); |
74b04a01 | 262 | } else { |
ba9703b0 | 263 | throw_ub_format!("overflow executing `{}`", intrinsic_name); |
74b04a01 | 264 | } |
b7449926 XL |
265 | } |
266 | self.write_scalar(val, dest)?; | |
267 | } | |
60c5eb7d | 268 | sym::rotate_left | sym::rotate_right => { |
a1dfa0c6 XL |
269 | // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) |
270 | // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) | |
271 | let layout = self.layout_of(substs.type_at(0))?; | |
6a06907d | 272 | let val = self.read_scalar(&args[0])?.check_init()?; |
136023e0 | 273 | let val_bits = val.to_bits(layout.size)?; |
6a06907d | 274 | let raw_shift = self.read_scalar(&args[1])?.check_init()?; |
136023e0 | 275 | let raw_shift_bits = raw_shift.to_bits(layout.size)?; |
ba9703b0 | 276 | let width_bits = u128::from(layout.size.bits()); |
a1dfa0c6 | 277 | let shift_bits = raw_shift_bits % width_bits; |
dc9dc135 | 278 | let inv_shift_bits = (width_bits - shift_bits) % width_bits; |
60c5eb7d | 279 | let result_bits = if intrinsic_name == sym::rotate_left { |
a1dfa0c6 XL |
280 | (val_bits << shift_bits) | (val_bits >> inv_shift_bits) |
281 | } else { | |
282 | (val_bits >> shift_bits) | (val_bits << inv_shift_bits) | |
283 | }; | |
284 | let truncated_bits = self.truncate(result_bits, layout); | |
285 | let result = Scalar::from_uint(truncated_bits, layout.size); | |
286 | self.write_scalar(result, dest)?; | |
287 | } | |
6a06907d | 288 | sym::copy => { |
17df50a5 | 289 | self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?; |
5869c6ff | 290 | } |
a2a8927a XL |
291 | sym::write_bytes => { |
292 | self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?; | |
293 | } | |
f9f354fc | 294 | sym::offset => { |
136023e0 | 295 | let ptr = self.read_pointer(&args[0])?; |
6a06907d | 296 | let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; |
f9f354fc | 297 | let pointee_ty = substs.type_at(0); |
e74abb32 | 298 | |
f9f354fc | 299 | let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; |
136023e0 | 300 | self.write_pointer(offset_ptr, dest)?; |
f9f354fc XL |
301 | } |
302 | sym::arith_offset => { | |
136023e0 | 303 | let ptr = self.read_pointer(&args[0])?; |
6a06907d | 304 | let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; |
f9f354fc XL |
305 | let pointee_ty = substs.type_at(0); |
306 | ||
307 | let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); | |
308 | let offset_bytes = offset_count.wrapping_mul(pointee_size); | |
136023e0 XL |
309 | let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self); |
310 | self.write_pointer(offset_ptr, dest)?; | |
f9f354fc | 311 | } |
04454e1e | 312 | sym::ptr_offset_from | sym::ptr_offset_from_unsigned => { |
5e7ed085 FG |
313 | let a = self.read_pointer(&args[0])?; |
314 | let b = self.read_pointer(&args[1])?; | |
60c5eb7d | 315 | |
923072b8 FG |
316 | let usize_layout = self.layout_of(self.tcx.types.usize)?; |
317 | let isize_layout = self.layout_of(self.tcx.types.isize)?; | |
5e7ed085 | 318 | |
923072b8 FG |
319 | // Get offsets for both that are at least relative to the same base. |
320 | let (a_offset, b_offset) = | |
321 | match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) { | |
322 | (Err(a), Err(b)) => { | |
323 | // Neither poiner points to an allocation. | |
324 | // If these are inequal or null, this *will* fail the deref check below. | |
325 | (a, b) | |
326 | } | |
327 | (Err(_), _) | (_, Err(_)) => { | |
328 | // We managed to find a valid allocation for one pointer, but not the other. | |
329 | // That means they are definitely not pointing to the same allocation. | |
04454e1e | 330 | throw_ub_format!( |
064997fb | 331 | "`{}` called on pointers into different allocations", |
923072b8 | 332 | intrinsic_name |
04454e1e FG |
333 | ); |
334 | } | |
923072b8 FG |
335 | (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => { |
336 | // Found allocation for both. They must be into the same allocation. | |
337 | if a_alloc_id != b_alloc_id { | |
338 | throw_ub_format!( | |
064997fb | 339 | "`{}` called on pointers into different allocations", |
923072b8 FG |
340 | intrinsic_name |
341 | ); | |
342 | } | |
343 | // Use these offsets for distance calculation. | |
344 | (a_offset.bytes(), b_offset.bytes()) | |
04454e1e | 345 | } |
923072b8 | 346 | }; |
04454e1e | 347 | |
923072b8 | 348 | // Compute distance. |
064997fb FG |
349 | let dist = { |
350 | // Addresses are unsigned, so this is a `usize` computation. We have to do the | |
351 | // overflow check separately anyway. | |
352 | let (val, overflowed, _ty) = { | |
353 | let a_offset = ImmTy::from_uint(a_offset, usize_layout); | |
354 | let b_offset = ImmTy::from_uint(b_offset, usize_layout); | |
355 | self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)? | |
356 | }; | |
923072b8 | 357 | if overflowed { |
064997fb FG |
358 | // a < b |
359 | if intrinsic_name == sym::ptr_offset_from_unsigned { | |
360 | throw_ub_format!( | |
361 | "`{}` called when first pointer has smaller offset than second: {} < {}", | |
362 | intrinsic_name, | |
363 | a_offset, | |
364 | b_offset, | |
365 | ); | |
366 | } | |
367 | // The signed form of the intrinsic allows this. If we interpret the | |
368 | // difference as isize, we'll get the proper signed difference. If that | |
369 | // seems *positive*, they were more than isize::MAX apart. | |
370 | let dist = val.to_machine_isize(self)?; | |
371 | if dist >= 0 { | |
372 | throw_ub_format!( | |
373 | "`{}` called when first pointer is too far before second", | |
374 | intrinsic_name | |
375 | ); | |
376 | } | |
377 | dist | |
378 | } else { | |
379 | // b >= a | |
380 | let dist = val.to_machine_isize(self)?; | |
381 | // If converting to isize produced a *negative* result, we had an overflow | |
382 | // because they were more than isize::MAX apart. | |
383 | if dist < 0 { | |
384 | throw_ub_format!( | |
385 | "`{}` called when first pointer is too far ahead of second", | |
386 | intrinsic_name | |
387 | ); | |
388 | } | |
389 | dist | |
60c5eb7d | 390 | } |
923072b8 FG |
391 | }; |
392 | ||
393 | // Check that the range between them is dereferenceable ("in-bounds or one past the | |
394 | // end of the same allocation"). This is like the check in ptr_offset_inbounds. | |
064997fb | 395 | let min_ptr = if dist >= 0 { b } else { a }; |
923072b8 FG |
396 | self.check_ptr_access_align( |
397 | min_ptr, | |
064997fb | 398 | Size::from_bytes(dist.unsigned_abs()), |
923072b8 FG |
399 | Align::ONE, |
400 | CheckInAllocMsg::OffsetFromTest, | |
401 | )?; | |
402 | ||
923072b8 FG |
403 | // Perform division by size to compute return value. |
404 | let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned { | |
064997fb | 405 | assert!(0 <= dist && dist <= self.machine_isize_max()); |
923072b8 FG |
406 | usize_layout |
407 | } else { | |
064997fb | 408 | assert!(self.machine_isize_min() <= dist && dist <= self.machine_isize_max()); |
923072b8 FG |
409 | isize_layout |
410 | }; | |
411 | let pointee_layout = self.layout_of(substs.type_at(0))?; | |
412 | // If ret_layout is unsigned, we checked that so is the distance, so we are good. | |
064997fb | 413 | let val = ImmTy::from_int(dist, ret_layout); |
923072b8 FG |
414 | let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout); |
415 | self.exact_div(&val, &size, dest)?; | |
e74abb32 XL |
416 | } |
417 | ||
60c5eb7d | 418 | sym::transmute => { |
064997fb | 419 | self.copy_op(&args[0], dest, /*allow_transmute*/ true)?; |
b7449926 | 420 | } |
a2a8927a | 421 | sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { |
fc512014 XL |
422 | let ty = instance.substs.type_at(0); |
423 | let layout = self.layout_of(ty)?; | |
424 | ||
a2a8927a XL |
425 | // For *all* intrinsics we first check `is_uninhabited` to give a more specific |
426 | // error message. | |
fc512014 XL |
427 | if layout.abi.is_uninhabited() { |
428 | // The run-time intrinsic panics just to get a good backtrace; here we abort | |
429 | // since there is no problem showing a backtrace even for aborts. | |
430 | M::abort( | |
431 | self, | |
432 | format!( | |
433 | "aborted execution: attempted to instantiate uninhabited type `{}`", | |
434 | ty | |
435 | ), | |
436 | )?; | |
437 | } | |
064997fb FG |
438 | |
439 | if intrinsic_name == sym::assert_zero_valid { | |
440 | let should_panic = !self.tcx.permits_zero_init(layout); | |
441 | ||
442 | if should_panic { | |
443 | M::abort( | |
444 | self, | |
445 | format!( | |
446 | "aborted execution: attempted to zero-initialize type `{}`, which is invalid", | |
447 | ty | |
448 | ), | |
449 | )?; | |
450 | } | |
a2a8927a | 451 | } |
064997fb FG |
452 | |
453 | if intrinsic_name == sym::assert_uninit_valid { | |
454 | let should_panic = !self.tcx.permits_uninit_init(layout); | |
455 | ||
456 | if should_panic { | |
457 | M::abort( | |
458 | self, | |
459 | format!( | |
460 | "aborted execution: attempted to leave type `{}` uninitialized, which is invalid", | |
461 | ty | |
462 | ), | |
463 | )?; | |
464 | } | |
a2a8927a | 465 | } |
fc512014 | 466 | } |
60c5eb7d | 467 | sym::simd_insert => { |
6a06907d XL |
468 | let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); |
469 | let elem = &args[2]; | |
3c0e092e XL |
470 | let (input, input_len) = self.operand_to_simd(&args[0])?; |
471 | let (dest, dest_len) = self.place_to_simd(dest)?; | |
472 | assert_eq!(input_len, dest_len, "Return vector length must match input length"); | |
e74abb32 | 473 | assert!( |
3c0e092e XL |
474 | index < dest_len, |
475 | "Index `{}` must be in bounds of vector with length {}`", | |
dfeec247 | 476 | index, |
3c0e092e | 477 | dest_len |
e74abb32 | 478 | ); |
b7449926 | 479 | |
3c0e092e XL |
480 | for i in 0..dest_len { |
481 | let place = self.mplace_index(&dest, i)?; | |
064997fb FG |
482 | let value = if i == index { |
483 | elem.clone() | |
484 | } else { | |
485 | self.mplace_index(&input, i)?.into() | |
486 | }; | |
487 | self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?; | |
e74abb32 XL |
488 | } |
489 | } | |
60c5eb7d | 490 | sym::simd_extract => { |
6a06907d | 491 | let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); |
3c0e092e | 492 | let (input, input_len) = self.operand_to_simd(&args[0])?; |
e74abb32 | 493 | assert!( |
3c0e092e XL |
494 | index < input_len, |
495 | "index `{}` must be in bounds of vector with length `{}`", | |
dfeec247 | 496 | index, |
3c0e092e | 497 | input_len |
e74abb32 | 498 | ); |
064997fb FG |
499 | self.copy_op( |
500 | &self.mplace_index(&input, index)?.into(), | |
501 | dest, | |
502 | /*allow_transmute*/ false, | |
503 | )?; | |
e74abb32 | 504 | } |
94222f64 | 505 | sym::likely | sym::unlikely | sym::black_box => { |
f035d41b | 506 | // These just return their argument |
064997fb | 507 | self.copy_op(&args[0], dest, /*allow_transmute*/ false)?; |
f035d41b | 508 | } |
1b1a35ee | 509 | sym::assume => { |
6a06907d | 510 | let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?; |
1b1a35ee XL |
511 | if !cond { |
512 | throw_ub_format!("`assume` intrinsic called with `false`"); | |
513 | } | |
514 | } | |
136023e0 XL |
515 | sym::raw_eq => { |
516 | let result = self.raw_eq_intrinsic(&args[0], &args[1])?; | |
517 | self.write_scalar(result, dest)?; | |
518 | } | |
064997fb FG |
519 | |
520 | sym::vtable_size => { | |
521 | let ptr = self.read_pointer(&args[0])?; | |
522 | let (size, _align) = self.get_vtable_size_and_align(ptr)?; | |
523 | self.write_scalar(Scalar::from_machine_usize(size.bytes(), self), dest)?; | |
524 | } | |
525 | sym::vtable_align => { | |
526 | let ptr = self.read_pointer(&args[0])?; | |
527 | let (_size, align) = self.get_vtable_size_and_align(ptr)?; | |
528 | self.write_scalar(Scalar::from_machine_usize(align.bytes(), self), dest)?; | |
529 | } | |
530 | ||
b7449926 XL |
531 | _ => return Ok(false), |
532 | } | |
533 | ||
6a06907d | 534 | trace!("{:?}", self.dump_place(**dest)); |
60c5eb7d | 535 | self.go_to_block(ret); |
b7449926 XL |
536 | Ok(true) |
537 | } | |
538 | ||
e74abb32 XL |
539 | pub fn exact_div( |
540 | &mut self, | |
064997fb FG |
541 | a: &ImmTy<'tcx, M::Provenance>, |
542 | b: &ImmTy<'tcx, M::Provenance>, | |
543 | dest: &PlaceTy<'tcx, M::Provenance>, | |
e74abb32 XL |
544 | ) -> InterpResult<'tcx> { |
545 | // Performs an exact division, resulting in undefined behavior where | |
74b04a01 XL |
546 | // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. |
547 | // First, check x % y != 0 (or if that computation overflows). | |
6a06907d | 548 | let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?; |
5e7ed085 FG |
549 | assert!(!overflow); // All overflow is UB, so this should never return on overflow. |
550 | if res.assert_bits(a.layout.size) != 0 { | |
551 | throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b) | |
e74abb32 | 552 | } |
74b04a01 | 553 | // `Rem` says this is all right, so we can let `Div` do its job. |
6a06907d | 554 | self.binop_ignore_overflow(BinOp::Div, &a, &b, dest) |
e74abb32 | 555 | } |
f9f354fc | 556 | |
5e7ed085 FG |
557 | pub fn saturating_arith( |
558 | &self, | |
559 | mir_op: BinOp, | |
064997fb FG |
560 | l: &ImmTy<'tcx, M::Provenance>, |
561 | r: &ImmTy<'tcx, M::Provenance>, | |
562 | ) -> InterpResult<'tcx, Scalar<M::Provenance>> { | |
5e7ed085 FG |
563 | assert!(matches!(mir_op, BinOp::Add | BinOp::Sub)); |
564 | let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?; | |
565 | Ok(if overflowed { | |
566 | let size = l.layout.size; | |
567 | let num_bits = size.bits(); | |
568 | if l.layout.abi.is_signed() { | |
569 | // For signed ints the saturated value depends on the sign of the first | |
570 | // term since the sign of the second term can be inferred from this and | |
571 | // the fact that the operation has overflowed (if either is 0 no | |
572 | // overflow can occur) | |
573 | let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?; | |
574 | let first_term_positive = first_term & (1 << (num_bits - 1)) == 0; | |
575 | if first_term_positive { | |
576 | // Negative overflow not possible since the positive first term | |
577 | // can only increase an (in range) negative term for addition | |
578 | // or corresponding negated positive term for subtraction | |
579 | Scalar::from_int(size.signed_int_max(), size) | |
580 | } else { | |
581 | // Positive overflow not possible for similar reason | |
582 | // max negative | |
583 | Scalar::from_int(size.signed_int_min(), size) | |
584 | } | |
585 | } else { | |
586 | // unsigned | |
587 | if matches!(mir_op, BinOp::Add) { | |
588 | // max unsigned | |
589 | Scalar::from_uint(size.unsigned_int_max(), size) | |
590 | } else { | |
591 | // underflow to 0 | |
592 | Scalar::from_uint(0u128, size) | |
593 | } | |
594 | } | |
595 | } else { | |
596 | val | |
597 | }) | |
598 | } | |
599 | ||
f9f354fc XL |
600 | /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its |
601 | /// allocation. For integer pointers, we consider each of them their own tiny allocation of size | |
17df50a5 | 602 | /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value. |
f9f354fc XL |
603 | pub fn ptr_offset_inbounds( |
604 | &self, | |
064997fb | 605 | ptr: Pointer<Option<M::Provenance>>, |
f9f354fc XL |
606 | pointee_ty: Ty<'tcx>, |
607 | offset_count: i64, | |
064997fb | 608 | ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> { |
f9f354fc XL |
609 | // We cannot overflow i64 as a type's size must be <= isize::MAX. |
610 | let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); | |
5e7ed085 FG |
611 | // The computed offset, in bytes, must not overflow an isize. |
612 | // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for | |
613 | // the difference to be noticeable. | |
f9f354fc XL |
614 | let offset_bytes = |
615 | offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?; | |
616 | // The offset being in bounds cannot rely on "wrapping around" the address space. | |
617 | // So, first rule out overflows in the pointer arithmetic. | |
136023e0 | 618 | let offset_ptr = ptr.signed_offset(offset_bytes, self)?; |
f9f354fc XL |
619 | // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the |
620 | // memory between these pointers must be accessible. Note that we do not require the | |
621 | // pointers to be properly aligned (unlike a read/write operation). | |
622 | let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr }; | |
17df50a5 | 623 | // This call handles checking for integer/null pointers. |
04454e1e | 624 | self.check_ptr_access_align( |
f9f354fc | 625 | min_ptr, |
923072b8 | 626 | Size::from_bytes(offset_bytes.unsigned_abs()), |
17df50a5 XL |
627 | Align::ONE, |
628 | CheckInAllocMsg::PointerArithmeticTest, | |
f9f354fc XL |
629 | )?; |
630 | Ok(offset_ptr) | |
631 | } | |
17df50a5 XL |
632 | |
633 | /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`. | |
634 | pub(crate) fn copy_intrinsic( | |
635 | &mut self, | |
064997fb FG |
636 | src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, |
637 | dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, | |
638 | count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, | |
17df50a5 XL |
639 | nonoverlapping: bool, |
640 | ) -> InterpResult<'tcx> { | |
641 | let count = self.read_scalar(&count)?.to_machine_usize(self)?; | |
642 | let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?; | |
643 | let (size, align) = (layout.size, layout.align.abi); | |
5e7ed085 FG |
644 | // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), |
645 | // but no actual allocation can be big enough for the difference to be noticeable. | |
17df50a5 XL |
646 | let size = size.checked_mul(count, self).ok_or_else(|| { |
647 | err_ub_format!( | |
648 | "overflow computing total size of `{}`", | |
649 | if nonoverlapping { "copy_nonoverlapping" } else { "copy" } | |
650 | ) | |
651 | })?; | |
652 | ||
136023e0 XL |
653 | let src = self.read_pointer(&src)?; |
654 | let dst = self.read_pointer(&dst)?; | |
17df50a5 | 655 | |
04454e1e | 656 | self.mem_copy(src, align, dst, align, size, nonoverlapping) |
17df50a5 | 657 | } |
136023e0 | 658 | |
a2a8927a XL |
659 | pub(crate) fn write_bytes_intrinsic( |
660 | &mut self, | |
064997fb FG |
661 | dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, |
662 | byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, | |
663 | count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, | |
a2a8927a XL |
664 | ) -> InterpResult<'tcx> { |
665 | let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?; | |
666 | ||
667 | let dst = self.read_pointer(&dst)?; | |
668 | let byte = self.read_scalar(&byte)?.to_u8()?; | |
669 | let count = self.read_scalar(&count)?.to_machine_usize(self)?; | |
670 | ||
5e7ed085 FG |
671 | // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max), |
672 | // but no actual allocation can be big enough for the difference to be noticeable. | |
a2a8927a XL |
673 | let len = layout |
674 | .size | |
675 | .checked_mul(count, self) | |
676 | .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?; | |
677 | ||
678 | let bytes = std::iter::repeat(byte).take(len.bytes_usize()); | |
04454e1e | 679 | self.write_bytes_ptr(dst, bytes) |
a2a8927a XL |
680 | } |
681 | ||
136023e0 XL |
682 | pub(crate) fn raw_eq_intrinsic( |
683 | &mut self, | |
064997fb FG |
684 | lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, |
685 | rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>, | |
686 | ) -> InterpResult<'tcx, Scalar<M::Provenance>> { | |
136023e0 XL |
687 | let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?; |
688 | assert!(!layout.is_unsized()); | |
689 | ||
690 | let lhs = self.read_pointer(lhs)?; | |
691 | let rhs = self.read_pointer(rhs)?; | |
04454e1e FG |
692 | let lhs_bytes = self.read_bytes_ptr(lhs, layout.size)?; |
693 | let rhs_bytes = self.read_bytes_ptr(rhs, layout.size)?; | |
136023e0 XL |
694 | Ok(Scalar::from_bool(lhs_bytes == rhs_bytes)) |
695 | } | |
b7449926 | 696 | } |