]>
Commit | Line | Data |
---|---|---|
b7449926 | 1 | //! Intrinsics and other functions that the miri engine executes without |
9fa01778 | 2 | //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE |
b7449926 XL |
3 | //! and miri. |
4 | ||
ba9703b0 XL |
5 | use std::convert::TryFrom; |
6 | ||
7 | use rustc_hir::def_id::DefId; | |
8 | use rustc_middle::mir::{ | |
dfeec247 | 9 | self, |
5869c6ff | 10 | interpret::{ConstValue, GlobalId, InterpResult, Scalar}, |
dfeec247 XL |
11 | BinOp, |
12 | }; | |
ba9703b0 | 13 | use rustc_middle::ty; |
c295e0f8 | 14 | use rustc_middle::ty::layout::LayoutOf as _; |
ba9703b0 | 15 | use rustc_middle::ty::subst::SubstsRef; |
f9f354fc | 16 | use rustc_middle::ty::{Ty, TyCtxt}; |
dfeec247 | 17 | use rustc_span::symbol::{sym, Symbol}; |
c295e0f8 | 18 | use rustc_target::abi::{Abi, Align, Primitive, Size}; |
b7449926 | 19 | |
3dfed10e XL |
20 | use super::{ |
21 | util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy, | |
136023e0 | 22 | Pointer, |
3dfed10e | 23 | }; |
b7449926 | 24 | |
e74abb32 | 25 | mod caller_location; |
dc9dc135 XL |
26 | mod type_name; |
27 | ||
6a06907d | 28 | fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> { |
b7449926 XL |
29 | let size = match kind { |
30 | Primitive::Int(integer, _) => integer.size(), | |
31 | _ => bug!("invalid `{}` argument: {:?}", name, bits), | |
32 | }; | |
ba9703b0 | 33 | let extra = 128 - u128::from(size.bits()); |
b7449926 | 34 | let bits_out = match name { |
ba9703b0 XL |
35 | sym::ctpop => u128::from(bits.count_ones()), |
36 | sym::ctlz => u128::from(bits.leading_zeros()) - extra, | |
37 | sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra, | |
60c5eb7d XL |
38 | sym::bswap => (bits << extra).swap_bytes(), |
39 | sym::bitreverse => (bits << extra).reverse_bits(), | |
b7449926 XL |
40 | _ => bug!("not a numeric intrinsic: {}", name), |
41 | }; | |
6a06907d | 42 | Scalar::from_uint(bits_out, size) |
b7449926 XL |
43 | } |
44 | ||
e1599b0c XL |
45 | /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated |
46 | /// inside an `InterpCx` and instead have their value computed directly from rustc internal info. | |
47 | crate fn eval_nullary_intrinsic<'tcx>( | |
48 | tcx: TyCtxt<'tcx>, | |
49 | param_env: ty::ParamEnv<'tcx>, | |
50 | def_id: DefId, | |
51 | substs: SubstsRef<'tcx>, | |
74b04a01 | 52 | ) -> InterpResult<'tcx, ConstValue<'tcx>> { |
e1599b0c | 53 | let tp_ty = substs.type_at(0); |
60c5eb7d | 54 | let name = tcx.item_name(def_id); |
e1599b0c | 55 | Ok(match name { |
60c5eb7d | 56 | sym::type_name => { |
3dfed10e | 57 | ensure_monomorphic_enough(tcx, tp_ty)?; |
e1599b0c | 58 | let alloc = type_name::alloc_type_name(tcx, tp_ty); |
74b04a01 | 59 | ConstValue::Slice { data: alloc, start: 0, end: alloc.len() } |
dfeec247 | 60 | } |
17df50a5 XL |
61 | sym::needs_drop => { |
62 | ensure_monomorphic_enough(tcx, tp_ty)?; | |
63 | ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)) | |
64 | } | |
c295e0f8 | 65 | sym::pref_align_of => { |
17df50a5 | 66 | // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. |
e1599b0c | 67 | let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?; |
c295e0f8 | 68 | ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx) |
dfeec247 | 69 | } |
3dfed10e XL |
70 | sym::type_id => { |
71 | ensure_monomorphic_enough(tcx, tp_ty)?; | |
72 | ConstValue::from_u64(tcx.type_id_hash(tp_ty)) | |
73 | } | |
fc512014 | 74 | sym::variant_count => match tp_ty.kind() { |
17df50a5 | 75 | // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough. |
fc512014 XL |
76 | ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx), |
77 | ty::Projection(_) | |
78 | | ty::Opaque(_, _) | |
79 | | ty::Param(_) | |
80 | | ty::Bound(_, _) | |
81 | | ty::Placeholder(_) | |
82 | | ty::Infer(_) => throw_inval!(TooGeneric), | |
83 | ty::Bool | |
84 | | ty::Char | |
85 | | ty::Int(_) | |
86 | | ty::Uint(_) | |
87 | | ty::Float(_) | |
88 | | ty::Foreign(_) | |
89 | | ty::Str | |
90 | | ty::Array(_, _) | |
91 | | ty::Slice(_) | |
92 | | ty::RawPtr(_) | |
93 | | ty::Ref(_, _, _) | |
94 | | ty::FnDef(_, _) | |
95 | | ty::FnPtr(_) | |
96 | | ty::Dynamic(_, _) | |
97 | | ty::Closure(_, _) | |
98 | | ty::Generator(_, _, _) | |
99 | | ty::GeneratorWitness(_) | |
100 | | ty::Never | |
101 | | ty::Tuple(_) | |
102 | | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx), | |
103 | }, | |
e1599b0c XL |
104 | other => bug!("`{}` is not a zero arg intrinsic", other), |
105 | }) | |
106 | } | |
107 | ||
ba9703b0 | 108 | impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { |
9fa01778 | 109 | /// Returns `true` if emulation happened. |
1b1a35ee XL |
110 | /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own |
111 | /// intrinsic handling. | |
b7449926 XL |
112 | pub fn emulate_intrinsic( |
113 | &mut self, | |
114 | instance: ty::Instance<'tcx>, | |
0bf4aa26 | 115 | args: &[OpTy<'tcx, M::PointerTag>], |
6a06907d | 116 | ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, |
dc9dc135 | 117 | ) -> InterpResult<'tcx, bool> { |
b7449926 | 118 | let substs = instance.substs; |
60c5eb7d XL |
119 | let intrinsic_name = self.tcx.item_name(instance.def_id()); |
120 | ||
ba9703b0 | 121 | // First handle intrinsics without return place. |
60c5eb7d | 122 | let (dest, ret) = match ret { |
60c5eb7d | 123 | None => match intrinsic_name { |
ba9703b0 | 124 | sym::transmute => throw_ub_format!("transmuting to uninhabited type"), |
fc512014 | 125 | sym::abort => M::abort(self, "the program aborted execution".to_owned())?, |
ba9703b0 | 126 | // Unsupported diverging intrinsic. |
60c5eb7d | 127 | _ => return Ok(false), |
dfeec247 | 128 | }, |
ba9703b0 | 129 | Some(p) => p, |
60c5eb7d | 130 | }; |
b7449926 | 131 | |
60c5eb7d | 132 | // Keep the patterns in this match ordered the same as the list in |
ba9703b0 | 133 | // `src/librustc_middle/ty/constness.rs` |
b7449926 | 134 | match intrinsic_name { |
60c5eb7d | 135 | sym::caller_location => { |
ba9703b0 | 136 | let span = self.find_closest_untracked_caller_location(); |
60c5eb7d | 137 | let location = self.alloc_caller_location_for_span(span); |
136023e0 | 138 | self.write_immediate(location.to_ref(self), dest)?; |
e74abb32 XL |
139 | } |
140 | ||
3dfed10e | 141 | sym::min_align_of_val | sym::size_of_val => { |
5869c6ff | 142 | // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be |
a2a8927a | 143 | // dereferenceable! |
6a06907d | 144 | let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?; |
3dfed10e | 145 | let (size, align) = self |
6a06907d | 146 | .size_and_align_of_mplace(&place)? |
3dfed10e XL |
147 | .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?; |
148 | ||
149 | let result = match intrinsic_name { | |
150 | sym::min_align_of_val => align.bytes(), | |
151 | sym::size_of_val => size.bytes(), | |
152 | _ => bug!(), | |
153 | }; | |
154 | ||
155 | self.write_scalar(Scalar::from_machine_usize(result, self), dest)?; | |
156 | } | |
157 | ||
c295e0f8 | 158 | sym::pref_align_of |
dfeec247 | 159 | | sym::needs_drop |
dfeec247 | 160 | | sym::type_id |
f035d41b XL |
161 | | sym::type_name |
162 | | sym::variant_count => { | |
dfeec247 | 163 | let gid = GlobalId { instance, promoted: None }; |
74b04a01 | 164 | let ty = match intrinsic_name { |
c295e0f8 | 165 | sym::pref_align_of | sym::variant_count => self.tcx.types.usize, |
74b04a01 XL |
166 | sym::needs_drop => self.tcx.types.bool, |
167 | sym::type_id => self.tcx.types.u64, | |
168 | sym::type_name => self.tcx.mk_static_str(), | |
ba9703b0 | 169 | _ => bug!("already checked for nullary intrinsics"), |
74b04a01 | 170 | }; |
1b1a35ee XL |
171 | let val = |
172 | self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?; | |
cdc7bbd5 | 173 | let val = self.const_val_to_op(val, ty, Some(dest.layout))?; |
6a06907d | 174 | self.copy_op(&val, dest)?; |
dc9dc135 XL |
175 | } |
176 | ||
dfeec247 | 177 | sym::ctpop |
60c5eb7d XL |
178 | | sym::cttz |
179 | | sym::cttz_nonzero | |
180 | | sym::ctlz | |
181 | | sym::ctlz_nonzero | |
182 | | sym::bswap | |
183 | | sym::bitreverse => { | |
b7449926 XL |
184 | let ty = substs.type_at(0); |
185 | let layout_of = self.layout_of(ty)?; | |
6a06907d | 186 | let val = self.read_scalar(&args[0])?.check_init()?; |
136023e0 | 187 | let bits = val.to_bits(layout_of.size)?; |
b7449926 | 188 | let kind = match layout_of.abi { |
c295e0f8 | 189 | Abi::Scalar(scalar) => scalar.value, |
f035d41b XL |
190 | _ => span_bug!( |
191 | self.cur_span(), | |
192 | "{} called on invalid type {:?}", | |
193 | intrinsic_name, | |
194 | ty | |
195 | ), | |
b7449926 | 196 | }; |
60c5eb7d XL |
197 | let (nonzero, intrinsic_name) = match intrinsic_name { |
198 | sym::cttz_nonzero => (true, sym::cttz), | |
199 | sym::ctlz_nonzero => (true, sym::ctlz), | |
200 | other => (false, other), | |
b7449926 | 201 | }; |
60c5eb7d XL |
202 | if nonzero && bits == 0 { |
203 | throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name); | |
204 | } | |
6a06907d | 205 | let out_val = numeric_intrinsic(intrinsic_name, bits, kind); |
b7449926 XL |
206 | self.write_scalar(out_val, dest)?; |
207 | } | |
fc512014 | 208 | sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { |
6a06907d XL |
209 | let lhs = self.read_immediate(&args[0])?; |
210 | let rhs = self.read_immediate(&args[1])?; | |
fc512014 XL |
211 | let bin_op = match intrinsic_name { |
212 | sym::add_with_overflow => BinOp::Add, | |
213 | sym::sub_with_overflow => BinOp::Sub, | |
214 | sym::mul_with_overflow => BinOp::Mul, | |
dfeec247 | 215 | _ => bug!("Already checked for int ops"), |
b7449926 | 216 | }; |
6a06907d | 217 | self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?; |
b7449926 | 218 | } |
60c5eb7d | 219 | sym::saturating_add | sym::saturating_sub => { |
6a06907d XL |
220 | let l = self.read_immediate(&args[0])?; |
221 | let r = self.read_immediate(&args[1])?; | |
60c5eb7d | 222 | let is_add = intrinsic_name == sym::saturating_add; |
6a06907d XL |
223 | let (val, overflowed, _ty) = self.overflowing_binary_op( |
224 | if is_add { BinOp::Add } else { BinOp::Sub }, | |
225 | &l, | |
226 | &r, | |
227 | )?; | |
9fa01778 | 228 | let val = if overflowed { |
c295e0f8 XL |
229 | let size = l.layout.size; |
230 | let num_bits = size.bits(); | |
9fa01778 XL |
231 | if l.layout.abi.is_signed() { |
232 | // For signed ints the saturated value depends on the sign of the first | |
233 | // term since the sign of the second term can be inferred from this and | |
234 | // the fact that the operation has overflowed (if either is 0 no | |
235 | // overflow can occur) | |
136023e0 | 236 | let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?; |
dfeec247 | 237 | let first_term_positive = first_term & (1 << (num_bits - 1)) == 0; |
9fa01778 XL |
238 | if first_term_positive { |
239 | // Negative overflow not possible since the positive first term | |
240 | // can only increase an (in range) negative term for addition | |
241 | // or corresponding negated positive term for subtraction | |
dfeec247 XL |
242 | Scalar::from_uint( |
243 | (1u128 << (num_bits - 1)) - 1, // max positive | |
244 | Size::from_bits(num_bits), | |
245 | ) | |
9fa01778 XL |
246 | } else { |
247 | // Positive overflow not possible for similar reason | |
248 | // max negative | |
249 | Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits)) | |
250 | } | |
dfeec247 XL |
251 | } else { |
252 | // unsigned | |
9fa01778 XL |
253 | if is_add { |
254 | // max unsigned | |
c295e0f8 | 255 | Scalar::from_uint(size.unsigned_int_max(), Size::from_bits(num_bits)) |
dfeec247 XL |
256 | } else { |
257 | // underflow to 0 | |
9fa01778 XL |
258 | Scalar::from_uint(0u128, Size::from_bits(num_bits)) |
259 | } | |
260 | } | |
261 | } else { | |
262 | val | |
263 | }; | |
264 | self.write_scalar(val, dest)?; | |
265 | } | |
ba9703b0 | 266 | sym::discriminant_value => { |
6a06907d XL |
267 | let place = self.deref_operand(&args[0])?; |
268 | let discr_val = self.read_discriminant(&place.into())?.0; | |
f9f354fc | 269 | self.write_scalar(discr_val, dest)?; |
ba9703b0 | 270 | } |
74b04a01 XL |
271 | sym::unchecked_shl |
272 | | sym::unchecked_shr | |
273 | | sym::unchecked_add | |
274 | | sym::unchecked_sub | |
275 | | sym::unchecked_mul | |
276 | | sym::unchecked_div | |
277 | | sym::unchecked_rem => { | |
6a06907d XL |
278 | let l = self.read_immediate(&args[0])?; |
279 | let r = self.read_immediate(&args[1])?; | |
b7449926 | 280 | let bin_op = match intrinsic_name { |
60c5eb7d XL |
281 | sym::unchecked_shl => BinOp::Shl, |
282 | sym::unchecked_shr => BinOp::Shr, | |
74b04a01 XL |
283 | sym::unchecked_add => BinOp::Add, |
284 | sym::unchecked_sub => BinOp::Sub, | |
285 | sym::unchecked_mul => BinOp::Mul, | |
286 | sym::unchecked_div => BinOp::Div, | |
287 | sym::unchecked_rem => BinOp::Rem, | |
dfeec247 | 288 | _ => bug!("Already checked for int ops"), |
b7449926 | 289 | }; |
6a06907d | 290 | let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?; |
b7449926 XL |
291 | if overflowed { |
292 | let layout = self.layout_of(substs.type_at(0))?; | |
136023e0 | 293 | let r_val = r.to_scalar()?.to_bits(layout.size)?; |
74b04a01 | 294 | if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name { |
ba9703b0 | 295 | throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name); |
74b04a01 | 296 | } else { |
ba9703b0 | 297 | throw_ub_format!("overflow executing `{}`", intrinsic_name); |
74b04a01 | 298 | } |
b7449926 XL |
299 | } |
300 | self.write_scalar(val, dest)?; | |
301 | } | |
60c5eb7d | 302 | sym::rotate_left | sym::rotate_right => { |
a1dfa0c6 XL |
303 | // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) |
304 | // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) | |
305 | let layout = self.layout_of(substs.type_at(0))?; | |
6a06907d | 306 | let val = self.read_scalar(&args[0])?.check_init()?; |
136023e0 | 307 | let val_bits = val.to_bits(layout.size)?; |
6a06907d | 308 | let raw_shift = self.read_scalar(&args[1])?.check_init()?; |
136023e0 | 309 | let raw_shift_bits = raw_shift.to_bits(layout.size)?; |
ba9703b0 | 310 | let width_bits = u128::from(layout.size.bits()); |
a1dfa0c6 | 311 | let shift_bits = raw_shift_bits % width_bits; |
dc9dc135 | 312 | let inv_shift_bits = (width_bits - shift_bits) % width_bits; |
60c5eb7d | 313 | let result_bits = if intrinsic_name == sym::rotate_left { |
a1dfa0c6 XL |
314 | (val_bits << shift_bits) | (val_bits >> inv_shift_bits) |
315 | } else { | |
316 | (val_bits >> shift_bits) | (val_bits << inv_shift_bits) | |
317 | }; | |
318 | let truncated_bits = self.truncate(result_bits, layout); | |
319 | let result = Scalar::from_uint(truncated_bits, layout.size); | |
320 | self.write_scalar(result, dest)?; | |
321 | } | |
6a06907d | 322 | sym::copy => { |
17df50a5 | 323 | self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?; |
5869c6ff | 324 | } |
a2a8927a XL |
325 | sym::write_bytes => { |
326 | self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?; | |
327 | } | |
f9f354fc | 328 | sym::offset => { |
136023e0 | 329 | let ptr = self.read_pointer(&args[0])?; |
6a06907d | 330 | let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; |
f9f354fc | 331 | let pointee_ty = substs.type_at(0); |
e74abb32 | 332 | |
f9f354fc | 333 | let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; |
136023e0 | 334 | self.write_pointer(offset_ptr, dest)?; |
f9f354fc XL |
335 | } |
336 | sym::arith_offset => { | |
136023e0 | 337 | let ptr = self.read_pointer(&args[0])?; |
6a06907d | 338 | let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; |
f9f354fc XL |
339 | let pointee_ty = substs.type_at(0); |
340 | ||
341 | let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); | |
342 | let offset_bytes = offset_count.wrapping_mul(pointee_size); | |
136023e0 XL |
343 | let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self); |
344 | self.write_pointer(offset_ptr, dest)?; | |
f9f354fc | 345 | } |
60c5eb7d | 346 | sym::ptr_offset_from => { |
6a06907d XL |
347 | let a = self.read_immediate(&args[0])?.to_scalar()?; |
348 | let b = self.read_immediate(&args[1])?.to_scalar()?; | |
60c5eb7d XL |
349 | |
350 | // Special case: if both scalars are *equal integers* | |
17df50a5 XL |
351 | // and not null, we pretend there is an allocation of size 0 right there, |
352 | // and their offset is 0. (There's never a valid object at null, making it an | |
60c5eb7d XL |
353 | // exception from the exception.) |
354 | // This is the dual to the special exception for offset-by-0 | |
355 | // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`). | |
356 | // | |
357 | // Control flow is weird because we cannot early-return (to reach the | |
358 | // `go_to_block` at the end). | |
136023e0 XL |
359 | let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) { |
360 | let a = a.try_to_machine_usize(*self.tcx).unwrap(); | |
361 | let b = b.try_to_machine_usize(*self.tcx).unwrap(); | |
60c5eb7d | 362 | if a == b && a != 0 { |
ba9703b0 | 363 | self.write_scalar(Scalar::from_machine_isize(0, self), dest)?; |
60c5eb7d | 364 | true |
dfeec247 XL |
365 | } else { |
366 | false | |
367 | } | |
368 | } else { | |
369 | false | |
370 | }; | |
60c5eb7d XL |
371 | |
372 | if !done { | |
373 | // General case: we need two pointers. | |
136023e0 XL |
374 | let a = self.scalar_to_ptr(a); |
375 | let b = self.scalar_to_ptr(b); | |
376 | let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?; | |
377 | let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?; | |
378 | if a_alloc_id != b_alloc_id { | |
60c5eb7d XL |
379 | throw_ub_format!( |
380 | "ptr_offset_from cannot compute offset of pointers into different \ | |
381 | allocations.", | |
382 | ); | |
383 | } | |
384 | let usize_layout = self.layout_of(self.tcx.types.usize)?; | |
ba9703b0 | 385 | let isize_layout = self.layout_of(self.tcx.types.isize)?; |
136023e0 XL |
386 | let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout); |
387 | let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout); | |
dfeec247 | 388 | let (val, _overflowed, _ty) = |
6a06907d | 389 | self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?; |
60c5eb7d XL |
390 | let pointee_layout = self.layout_of(substs.type_at(0))?; |
391 | let val = ImmTy::from_scalar(val, isize_layout); | |
392 | let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout); | |
6a06907d | 393 | self.exact_div(&val, &size, dest)?; |
60c5eb7d | 394 | } |
e74abb32 XL |
395 | } |
396 | ||
60c5eb7d | 397 | sym::transmute => { |
6a06907d | 398 | self.copy_op_transmute(&args[0], dest)?; |
b7449926 | 399 | } |
a2a8927a | 400 | sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { |
fc512014 XL |
401 | let ty = instance.substs.type_at(0); |
402 | let layout = self.layout_of(ty)?; | |
403 | ||
a2a8927a XL |
404 | // For *all* intrinsics we first check `is_uninhabited` to give a more specific |
405 | // error message. | |
fc512014 XL |
406 | if layout.abi.is_uninhabited() { |
407 | // The run-time intrinsic panics just to get a good backtrace; here we abort | |
408 | // since there is no problem showing a backtrace even for aborts. | |
409 | M::abort( | |
410 | self, | |
411 | format!( | |
412 | "aborted execution: attempted to instantiate uninhabited type `{}`", | |
413 | ty | |
414 | ), | |
415 | )?; | |
416 | } | |
a2a8927a XL |
417 | if intrinsic_name == sym::assert_zero_valid |
418 | && !layout.might_permit_raw_init(self, /*zero:*/ true) | |
419 | { | |
420 | M::abort( | |
421 | self, | |
422 | format!( | |
423 | "aborted execution: attempted to zero-initialize type `{}`, which is invalid", | |
424 | ty | |
425 | ), | |
426 | )?; | |
427 | } | |
428 | if intrinsic_name == sym::assert_uninit_valid | |
429 | && !layout.might_permit_raw_init(self, /*zero:*/ false) | |
430 | { | |
431 | M::abort( | |
432 | self, | |
433 | format!( | |
434 | "aborted execution: attempted to leave type `{}` uninitialized, which is invalid", | |
435 | ty | |
436 | ), | |
437 | )?; | |
438 | } | |
fc512014 | 439 | } |
60c5eb7d | 440 | sym::simd_insert => { |
6a06907d XL |
441 | let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); |
442 | let elem = &args[2]; | |
3c0e092e XL |
443 | let (input, input_len) = self.operand_to_simd(&args[0])?; |
444 | let (dest, dest_len) = self.place_to_simd(dest)?; | |
445 | assert_eq!(input_len, dest_len, "Return vector length must match input length"); | |
e74abb32 | 446 | assert!( |
3c0e092e XL |
447 | index < dest_len, |
448 | "Index `{}` must be in bounds of vector with length {}`", | |
dfeec247 | 449 | index, |
3c0e092e | 450 | dest_len |
e74abb32 | 451 | ); |
b7449926 | 452 | |
3c0e092e XL |
453 | for i in 0..dest_len { |
454 | let place = self.mplace_index(&dest, i)?; | |
455 | let value = | |
456 | if i == index { *elem } else { self.mplace_index(&input, i)?.into() }; | |
457 | self.copy_op(&value, &place.into())?; | |
e74abb32 XL |
458 | } |
459 | } | |
60c5eb7d | 460 | sym::simd_extract => { |
6a06907d | 461 | let index = u64::from(self.read_scalar(&args[1])?.to_u32()?); |
3c0e092e | 462 | let (input, input_len) = self.operand_to_simd(&args[0])?; |
e74abb32 | 463 | assert!( |
3c0e092e XL |
464 | index < input_len, |
465 | "index `{}` must be in bounds of vector with length `{}`", | |
dfeec247 | 466 | index, |
3c0e092e | 467 | input_len |
e74abb32 | 468 | ); |
3c0e092e | 469 | self.copy_op(&self.mplace_index(&input, index)?.into(), dest)?; |
e74abb32 | 470 | } |
94222f64 | 471 | sym::likely | sym::unlikely | sym::black_box => { |
f035d41b | 472 | // These just return their argument |
6a06907d | 473 | self.copy_op(&args[0], dest)?; |
f035d41b | 474 | } |
1b1a35ee | 475 | sym::assume => { |
6a06907d | 476 | let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?; |
1b1a35ee XL |
477 | if !cond { |
478 | throw_ub_format!("`assume` intrinsic called with `false`"); | |
479 | } | |
480 | } | |
136023e0 XL |
481 | sym::raw_eq => { |
482 | let result = self.raw_eq_intrinsic(&args[0], &args[1])?; | |
483 | self.write_scalar(result, dest)?; | |
484 | } | |
b7449926 XL |
485 | _ => return Ok(false), |
486 | } | |
487 | ||
6a06907d | 488 | trace!("{:?}", self.dump_place(**dest)); |
60c5eb7d | 489 | self.go_to_block(ret); |
b7449926 XL |
490 | Ok(true) |
491 | } | |
492 | ||
e74abb32 XL |
493 | pub fn exact_div( |
494 | &mut self, | |
6a06907d XL |
495 | a: &ImmTy<'tcx, M::PointerTag>, |
496 | b: &ImmTy<'tcx, M::PointerTag>, | |
497 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
e74abb32 XL |
498 | ) -> InterpResult<'tcx> { |
499 | // Performs an exact division, resulting in undefined behavior where | |
74b04a01 XL |
500 | // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`. |
501 | // First, check x % y != 0 (or if that computation overflows). | |
6a06907d | 502 | let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?; |
74b04a01 XL |
503 | if overflow || res.assert_bits(a.layout.size) != 0 { |
504 | // Then, check if `b` is -1, which is the "MIN / -1" case. | |
e74abb32 | 505 | let minus1 = Scalar::from_int(-1, dest.layout.size); |
60c5eb7d XL |
506 | let b_scalar = b.to_scalar().unwrap(); |
507 | if b_scalar == minus1 { | |
e74abb32 XL |
508 | throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented") |
509 | } else { | |
dfeec247 | 510 | throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,) |
e74abb32 XL |
511 | } |
512 | } | |
74b04a01 | 513 | // `Rem` says this is all right, so we can let `Div` do its job. |
6a06907d | 514 | self.binop_ignore_overflow(BinOp::Div, &a, &b, dest) |
e74abb32 | 515 | } |
f9f354fc XL |
516 | |
517 | /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its | |
518 | /// allocation. For integer pointers, we consider each of them their own tiny allocation of size | |
17df50a5 | 519 | /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value. |
f9f354fc XL |
520 | pub fn ptr_offset_inbounds( |
521 | &self, | |
136023e0 | 522 | ptr: Pointer<Option<M::PointerTag>>, |
f9f354fc XL |
523 | pointee_ty: Ty<'tcx>, |
524 | offset_count: i64, | |
136023e0 | 525 | ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> { |
f9f354fc XL |
526 | // We cannot overflow i64 as a type's size must be <= isize::MAX. |
527 | let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); | |
528 | // The computed offset, in bytes, cannot overflow an isize. | |
529 | let offset_bytes = | |
530 | offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?; | |
531 | // The offset being in bounds cannot rely on "wrapping around" the address space. | |
532 | // So, first rule out overflows in the pointer arithmetic. | |
136023e0 | 533 | let offset_ptr = ptr.signed_offset(offset_bytes, self)?; |
f9f354fc XL |
534 | // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the |
535 | // memory between these pointers must be accessible. Note that we do not require the | |
536 | // pointers to be properly aligned (unlike a read/write operation). | |
537 | let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr }; | |
5869c6ff | 538 | let size = offset_bytes.unsigned_abs(); |
17df50a5 | 539 | // This call handles checking for integer/null pointers. |
f9f354fc XL |
540 | self.memory.check_ptr_access_align( |
541 | min_ptr, | |
542 | Size::from_bytes(size), | |
17df50a5 XL |
543 | Align::ONE, |
544 | CheckInAllocMsg::PointerArithmeticTest, | |
f9f354fc XL |
545 | )?; |
546 | Ok(offset_ptr) | |
547 | } | |
17df50a5 XL |
548 | |
549 | /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`. | |
550 | pub(crate) fn copy_intrinsic( | |
551 | &mut self, | |
552 | src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
553 | dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
554 | count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
555 | nonoverlapping: bool, | |
556 | ) -> InterpResult<'tcx> { | |
557 | let count = self.read_scalar(&count)?.to_machine_usize(self)?; | |
558 | let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?; | |
559 | let (size, align) = (layout.size, layout.align.abi); | |
560 | let size = size.checked_mul(count, self).ok_or_else(|| { | |
561 | err_ub_format!( | |
562 | "overflow computing total size of `{}`", | |
563 | if nonoverlapping { "copy_nonoverlapping" } else { "copy" } | |
564 | ) | |
565 | })?; | |
566 | ||
136023e0 XL |
567 | let src = self.read_pointer(&src)?; |
568 | let dst = self.read_pointer(&dst)?; | |
17df50a5 XL |
569 | |
570 | self.memory.copy(src, align, dst, align, size, nonoverlapping) | |
571 | } | |
136023e0 | 572 | |
a2a8927a XL |
573 | pub(crate) fn write_bytes_intrinsic( |
574 | &mut self, | |
575 | dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
576 | byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
577 | count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
578 | ) -> InterpResult<'tcx> { | |
579 | let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?; | |
580 | ||
581 | let dst = self.read_pointer(&dst)?; | |
582 | let byte = self.read_scalar(&byte)?.to_u8()?; | |
583 | let count = self.read_scalar(&count)?.to_machine_usize(self)?; | |
584 | ||
585 | let len = layout | |
586 | .size | |
587 | .checked_mul(count, self) | |
588 | .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?; | |
589 | ||
590 | let bytes = std::iter::repeat(byte).take(len.bytes_usize()); | |
591 | self.memory.write_bytes(dst, bytes) | |
592 | } | |
593 | ||
136023e0 XL |
594 | pub(crate) fn raw_eq_intrinsic( |
595 | &mut self, | |
596 | lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
597 | rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>, | |
598 | ) -> InterpResult<'tcx, Scalar<M::PointerTag>> { | |
599 | let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?; | |
600 | assert!(!layout.is_unsized()); | |
601 | ||
602 | let lhs = self.read_pointer(lhs)?; | |
603 | let rhs = self.read_pointer(rhs)?; | |
604 | let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?; | |
605 | let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?; | |
606 | Ok(Scalar::from_bool(lhs_bytes == rhs_bytes)) | |
607 | } | |
b7449926 | 608 | } |