1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
5 use std
::convert
::TryFrom
;
7 use rustc_hir
::def_id
::DefId
;
8 use rustc_middle
::mir
::{
10 interpret
::{ConstValue, GlobalId, InterpResult, Scalar}
,
14 use rustc_middle
::ty
::subst
::SubstsRef
;
15 use rustc_middle
::ty
::{Ty, TyCtxt}
;
16 use rustc_span
::symbol
::{sym, Symbol}
;
17 use rustc_target
::abi
::{Abi, LayoutOf as _, Primitive, Size}
;
20 util
::ensure_monomorphic_enough
, CheckInAllocMsg
, ImmTy
, InterpCx
, Machine
, OpTy
, PlaceTy
,
26 fn numeric_intrinsic
<'tcx
, Tag
>(
30 ) -> InterpResult
<'tcx
, Scalar
<Tag
>> {
31 let size
= match kind
{
32 Primitive
::Int(integer
, _
) => integer
.size(),
33 _
=> bug
!("invalid `{}` argument: {:?}", name
, bits
),
35 let extra
= 128 - u128
::from(size
.bits());
36 let bits_out
= match name
{
37 sym
::ctpop
=> u128
::from(bits
.count_ones()),
38 sym
::ctlz
=> u128
::from(bits
.leading_zeros()) - extra
,
39 sym
::cttz
=> u128
::from((bits
<< extra
).trailing_zeros()) - extra
,
40 sym
::bswap
=> (bits
<< extra
).swap_bytes(),
41 sym
::bitreverse
=> (bits
<< extra
).reverse_bits(),
42 _
=> bug
!("not a numeric intrinsic: {}", name
),
44 Ok(Scalar
::from_uint(bits_out
, size
))
47 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
48 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
49 crate fn eval_nullary_intrinsic
<'tcx
>(
51 param_env
: ty
::ParamEnv
<'tcx
>,
53 substs
: SubstsRef
<'tcx
>,
54 ) -> InterpResult
<'tcx
, ConstValue
<'tcx
>> {
55 let tp_ty
= substs
.type_at(0);
56 let name
= tcx
.item_name(def_id
);
59 ensure_monomorphic_enough(tcx
, tp_ty
)?
;
60 let alloc
= type_name
::alloc_type_name(tcx
, tp_ty
);
61 ConstValue
::Slice { data: alloc, start: 0, end: alloc.len() }
63 sym
::needs_drop
=> ConstValue
::from_bool(tp_ty
.needs_drop(tcx
, param_env
)),
64 sym
::min_align_of
| sym
::pref_align_of
=> {
65 let layout
= tcx
.layout_of(param_env
.and(tp_ty
)).map_err(|e
| err_inval
!(Layout(e
)))?
;
67 sym
::pref_align_of
=> layout
.align
.pref
.bytes(),
68 sym
::min_align_of
=> layout
.align
.abi
.bytes(),
71 ConstValue
::from_machine_usize(n
, &tcx
)
74 ensure_monomorphic_enough(tcx
, tp_ty
)?
;
75 ConstValue
::from_u64(tcx
.type_id_hash(tp_ty
))
77 sym
::variant_count
=> match tp_ty
.kind() {
78 ty
::Adt(ref adt
, _
) => ConstValue
::from_machine_usize(adt
.variants
.len() as u64, &tcx
),
84 | ty
::Infer(_
) => throw_inval
!(TooGeneric
),
100 | ty
::Generator(_
, _
, _
)
101 | ty
::GeneratorWitness(_
)
104 | ty
::Error(_
) => ConstValue
::from_machine_usize(0u64, &tcx
),
106 other
=> bug
!("`{}` is not a zero arg intrinsic", other
),
110 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
111 /// Returns `true` if emulation happened.
112 /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
113 /// intrinsic handling.
114 pub fn emulate_intrinsic(
116 instance
: ty
::Instance
<'tcx
>,
117 args
: &[OpTy
<'tcx
, M
::PointerTag
>],
118 ret
: Option
<(PlaceTy
<'tcx
, M
::PointerTag
>, mir
::BasicBlock
)>,
119 ) -> InterpResult
<'tcx
, bool
> {
120 let substs
= instance
.substs
;
121 let intrinsic_name
= self.tcx
.item_name(instance
.def_id());
123 // First handle intrinsics without return place.
124 let (dest
, ret
) = match ret
{
125 None
=> match intrinsic_name
{
126 sym
::transmute
=> throw_ub_format
!("transmuting to uninhabited type"),
127 sym
::abort
=> M
::abort(self, "the program aborted execution".to_owned())?
,
128 // Unsupported diverging intrinsic.
129 _
=> return Ok(false),
134 // Keep the patterns in this match ordered the same as the list in
135 // `src/librustc_middle/ty/constness.rs`
136 match intrinsic_name
{
137 sym
::caller_location
=> {
138 let span
= self.find_closest_untracked_caller_location();
139 let location
= self.alloc_caller_location_for_span(span
);
140 self.write_scalar(location
.ptr
, dest
)?
;
143 sym
::min_align_of_val
| sym
::size_of_val
=> {
144 // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
146 let place
= self.ref_to_mplace(self.read_immediate(args
[0])?
)?
;
147 let (size
, align
) = self
148 .size_and_align_of_mplace(place
)?
149 .ok_or_else(|| err_unsup_format
!("`extern type` does not have known layout"))?
;
151 let result
= match intrinsic_name
{
152 sym
::min_align_of_val
=> align
.bytes(),
153 sym
::size_of_val
=> size
.bytes(),
157 self.write_scalar(Scalar
::from_machine_usize(result
, self), dest
)?
;
165 | sym
::variant_count
=> {
166 let gid
= GlobalId { instance, promoted: None }
;
167 let ty
= match intrinsic_name
{
168 sym
::min_align_of
| sym
::pref_align_of
| sym
::variant_count
=> {
171 sym
::needs_drop
=> self.tcx
.types
.bool
,
172 sym
::type_id
=> self.tcx
.types
.u64,
173 sym
::type_name
=> self.tcx
.mk_static_str(),
174 _
=> bug
!("already checked for nullary intrinsics"),
177 self.tcx
.const_eval_global_id(self.param_env
, gid
, Some(self.tcx
.span
))?
;
178 let const_
= ty
::Const { val: ty::ConstKind::Value(val), ty }
;
179 let val
= self.const_to_op(&const_
, None
)?
;
180 self.copy_op(val
, dest
)?
;
189 | sym
::bitreverse
=> {
190 let ty
= substs
.type_at(0);
191 let layout_of
= self.layout_of(ty
)?
;
192 let val
= self.read_scalar(args
[0])?
.check_init()?
;
193 let bits
= self.force_bits(val
, layout_of
.size
)?
;
194 let kind
= match layout_of
.abi
{
195 Abi
::Scalar(ref scalar
) => scalar
.value
,
198 "{} called on invalid type {:?}",
203 let (nonzero
, intrinsic_name
) = match intrinsic_name
{
204 sym
::cttz_nonzero
=> (true, sym
::cttz
),
205 sym
::ctlz_nonzero
=> (true, sym
::ctlz
),
206 other
=> (false, other
),
208 if nonzero
&& bits
== 0 {
209 throw_ub_format
!("`{}_nonzero` called on 0", intrinsic_name
);
211 let out_val
= numeric_intrinsic(intrinsic_name
, bits
, kind
)?
;
212 self.write_scalar(out_val
, dest
)?
;
214 sym
::add_with_overflow
| sym
::sub_with_overflow
| sym
::mul_with_overflow
=> {
215 let lhs
= self.read_immediate(args
[0])?
;
216 let rhs
= self.read_immediate(args
[1])?
;
217 let bin_op
= match intrinsic_name
{
218 sym
::add_with_overflow
=> BinOp
::Add
,
219 sym
::sub_with_overflow
=> BinOp
::Sub
,
220 sym
::mul_with_overflow
=> BinOp
::Mul
,
221 _
=> bug
!("Already checked for int ops"),
223 self.binop_with_overflow(bin_op
, lhs
, rhs
, dest
)?
;
225 sym
::saturating_add
| sym
::saturating_sub
=> {
226 let l
= self.read_immediate(args
[0])?
;
227 let r
= self.read_immediate(args
[1])?
;
228 let is_add
= intrinsic_name
== sym
::saturating_add
;
229 let (val
, overflowed
, _ty
) =
230 self.overflowing_binary_op(if is_add { BinOp::Add }
else { BinOp::Sub }
, l
, r
)?
;
231 let val
= if overflowed
{
232 let num_bits
= l
.layout
.size
.bits();
233 if l
.layout
.abi
.is_signed() {
234 // For signed ints the saturated value depends on the sign of the first
235 // term since the sign of the second term can be inferred from this and
236 // the fact that the operation has overflowed (if either is 0 no
237 // overflow can occur)
238 let first_term
: u128
= self.force_bits(l
.to_scalar()?
, l
.layout
.size
)?
;
239 let first_term_positive
= first_term
& (1 << (num_bits
- 1)) == 0;
240 if first_term_positive
{
241 // Negative overflow not possible since the positive first term
242 // can only increase an (in range) negative term for addition
243 // or corresponding negated positive term for subtraction
245 (1u128 << (num_bits
- 1)) - 1, // max positive
246 Size
::from_bits(num_bits
),
249 // Positive overflow not possible for similar reason
251 Scalar
::from_uint(1u128 << (num_bits
- 1), Size
::from_bits(num_bits
))
258 u128
::MAX
>> (128 - num_bits
),
259 Size
::from_bits(num_bits
),
263 Scalar
::from_uint(0u128, Size
::from_bits(num_bits
))
269 self.write_scalar(val
, dest
)?
;
271 sym
::discriminant_value
=> {
272 let place
= self.deref_operand(args
[0])?
;
273 let discr_val
= self.read_discriminant(place
.into())?
.0;
274 self.write_scalar(discr_val
, dest
)?
;
282 | sym
::unchecked_rem
=> {
283 let l
= self.read_immediate(args
[0])?
;
284 let r
= self.read_immediate(args
[1])?
;
285 let bin_op
= match intrinsic_name
{
286 sym
::unchecked_shl
=> BinOp
::Shl
,
287 sym
::unchecked_shr
=> BinOp
::Shr
,
288 sym
::unchecked_add
=> BinOp
::Add
,
289 sym
::unchecked_sub
=> BinOp
::Sub
,
290 sym
::unchecked_mul
=> BinOp
::Mul
,
291 sym
::unchecked_div
=> BinOp
::Div
,
292 sym
::unchecked_rem
=> BinOp
::Rem
,
293 _
=> bug
!("Already checked for int ops"),
295 let (val
, overflowed
, _ty
) = self.overflowing_binary_op(bin_op
, l
, r
)?
;
297 let layout
= self.layout_of(substs
.type_at(0))?
;
298 let r_val
= self.force_bits(r
.to_scalar()?
, layout
.size
)?
;
299 if let sym
::unchecked_shl
| sym
::unchecked_shr
= intrinsic_name
{
300 throw_ub_format
!("overflowing shift by {} in `{}`", r_val
, intrinsic_name
);
302 throw_ub_format
!("overflow executing `{}`", intrinsic_name
);
305 self.write_scalar(val
, dest
)?
;
307 sym
::rotate_left
| sym
::rotate_right
=> {
308 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
309 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
310 let layout
= self.layout_of(substs
.type_at(0))?
;
311 let val
= self.read_scalar(args
[0])?
.check_init()?
;
312 let val_bits
= self.force_bits(val
, layout
.size
)?
;
313 let raw_shift
= self.read_scalar(args
[1])?
.check_init()?
;
314 let raw_shift_bits
= self.force_bits(raw_shift
, layout
.size
)?
;
315 let width_bits
= u128
::from(layout
.size
.bits());
316 let shift_bits
= raw_shift_bits
% width_bits
;
317 let inv_shift_bits
= (width_bits
- shift_bits
) % width_bits
;
318 let result_bits
= if intrinsic_name
== sym
::rotate_left
{
319 (val_bits
<< shift_bits
) | (val_bits
>> inv_shift_bits
)
321 (val_bits
>> shift_bits
) | (val_bits
<< inv_shift_bits
)
323 let truncated_bits
= self.truncate(result_bits
, layout
);
324 let result
= Scalar
::from_uint(truncated_bits
, layout
.size
);
325 self.write_scalar(result
, dest
)?
;
327 sym
::copy
| sym
::copy_nonoverlapping
=> {
328 let elem_ty
= instance
.substs
.type_at(0);
329 let elem_layout
= self.layout_of(elem_ty
)?
;
330 let count
= self.read_scalar(args
[2])?
.to_machine_usize(self)?
;
331 let elem_align
= elem_layout
.align
.abi
;
333 let size
= elem_layout
.size
.checked_mul(count
, self).ok_or_else(|| {
334 err_ub_format
!("overflow computing total size of `{}`", intrinsic_name
)
336 let src
= self.read_scalar(args
[0])?
.check_init()?
;
337 let src
= self.memory
.check_ptr_access(src
, size
, elem_align
)?
;
338 let dest
= self.read_scalar(args
[1])?
.check_init()?
;
339 let dest
= self.memory
.check_ptr_access(dest
, size
, elem_align
)?
;
341 if let (Some(src
), Some(dest
)) = (src
, dest
) {
346 intrinsic_name
== sym
::copy_nonoverlapping
,
351 let ptr
= self.read_scalar(args
[0])?
.check_init()?
;
352 let offset_count
= self.read_scalar(args
[1])?
.to_machine_isize(self)?
;
353 let pointee_ty
= substs
.type_at(0);
355 let offset_ptr
= self.ptr_offset_inbounds(ptr
, pointee_ty
, offset_count
)?
;
356 self.write_scalar(offset_ptr
, dest
)?
;
358 sym
::arith_offset
=> {
359 let ptr
= self.read_scalar(args
[0])?
.check_init()?
;
360 let offset_count
= self.read_scalar(args
[1])?
.to_machine_isize(self)?
;
361 let pointee_ty
= substs
.type_at(0);
363 let pointee_size
= i64::try_from(self.layout_of(pointee_ty
)?
.size
.bytes()).unwrap();
364 let offset_bytes
= offset_count
.wrapping_mul(pointee_size
);
365 let offset_ptr
= ptr
.ptr_wrapping_signed_offset(offset_bytes
, self);
366 self.write_scalar(offset_ptr
, dest
)?
;
368 sym
::ptr_offset_from
=> {
369 let a
= self.read_immediate(args
[0])?
.to_scalar()?
;
370 let b
= self.read_immediate(args
[1])?
.to_scalar()?
;
372 // Special case: if both scalars are *equal integers*
373 // and not NULL, we pretend there is an allocation of size 0 right there,
374 // and their offset is 0. (There's never a valid object at NULL, making it an
375 // exception from the exception.)
376 // This is the dual to the special exception for offset-by-0
377 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
379 // Control flow is weird because we cannot early-return (to reach the
380 // `go_to_block` at the end).
381 let done
= if a
.is_bits() && b
.is_bits() {
382 let a
= a
.to_machine_usize(self)?
;
383 let b
= b
.to_machine_usize(self)?
;
384 if a
== b
&& a
!= 0 {
385 self.write_scalar(Scalar
::from_machine_isize(0, self), dest
)?
;
395 // General case: we need two pointers.
396 let a
= self.force_ptr(a
)?
;
397 let b
= self.force_ptr(b
)?
;
398 if a
.alloc_id
!= b
.alloc_id
{
400 "ptr_offset_from cannot compute offset of pointers into different \
404 let usize_layout
= self.layout_of(self.tcx
.types
.usize)?
;
405 let isize_layout
= self.layout_of(self.tcx
.types
.isize)?
;
406 let a_offset
= ImmTy
::from_uint(a
.offset
.bytes(), usize_layout
);
407 let b_offset
= ImmTy
::from_uint(b
.offset
.bytes(), usize_layout
);
408 let (val
, _overflowed
, _ty
) =
409 self.overflowing_binary_op(BinOp
::Sub
, a_offset
, b_offset
)?
;
410 let pointee_layout
= self.layout_of(substs
.type_at(0))?
;
411 let val
= ImmTy
::from_scalar(val
, isize_layout
);
412 let size
= ImmTy
::from_int(pointee_layout
.size
.bytes(), isize_layout
);
413 self.exact_div(val
, size
, dest
)?
;
418 self.copy_op_transmute(args
[0], dest
)?
;
420 sym
::assert_inhabited
=> {
421 let ty
= instance
.substs
.type_at(0);
422 let layout
= self.layout_of(ty
)?
;
424 if layout
.abi
.is_uninhabited() {
425 // The run-time intrinsic panics just to get a good backtrace; here we abort
426 // since there is no problem showing a backtrace even for aborts.
430 "aborted execution: attempted to instantiate uninhabited type `{}`",
436 sym
::simd_insert
=> {
437 let index
= u64::from(self.read_scalar(args
[1])?
.to_u32()?
);
440 let (len
, e_ty
) = input
.layout
.ty
.simd_size_and_type(*self.tcx
);
443 "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
449 input
.layout
, dest
.layout
,
450 "Return type `{}` must match vector type `{}`",
451 dest
.layout
.ty
, input
.layout
.ty
454 elem
.layout
.ty
, e_ty
,
455 "Scalar element type `{}` must match vector element type `{}`",
460 let place
= self.place_index(dest
, i
)?
;
461 let value
= if i
== index { elem }
else { self.operand_index(input, i)? }
;
462 self.copy_op(value
, place
)?
;
465 sym
::simd_extract
=> {
466 let index
= u64::from(self.read_scalar(args
[1])?
.to_u32()?
);
467 let (len
, e_ty
) = args
[0].layout
.ty
.simd_size_and_type(*self.tcx
);
470 "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
476 e_ty
, dest
.layout
.ty
,
477 "Return type `{}` must match vector element type `{}`",
480 self.copy_op(self.operand_index(args
[0], index
)?
, dest
)?
;
482 sym
::likely
| sym
::unlikely
=> {
483 // These just return their argument
484 self.copy_op(args
[0], dest
)?
;
487 let cond
= self.read_scalar(args
[0])?
.check_init()?
.to_bool()?
;
489 throw_ub_format
!("`assume` intrinsic called with `false`");
492 _
=> return Ok(false),
495 trace
!("{:?}", self.dump_place(*dest
));
496 self.go_to_block(ret
);
502 a
: ImmTy
<'tcx
, M
::PointerTag
>,
503 b
: ImmTy
<'tcx
, M
::PointerTag
>,
504 dest
: PlaceTy
<'tcx
, M
::PointerTag
>,
505 ) -> InterpResult
<'tcx
> {
506 // Performs an exact division, resulting in undefined behavior where
507 // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
508 // First, check x % y != 0 (or if that computation overflows).
509 let (res
, overflow
, _ty
) = self.overflowing_binary_op(BinOp
::Rem
, a
, b
)?
;
510 if overflow
|| res
.assert_bits(a
.layout
.size
) != 0 {
511 // Then, check if `b` is -1, which is the "MIN / -1" case.
512 let minus1
= Scalar
::from_int(-1, dest
.layout
.size
);
513 let b_scalar
= b
.to_scalar().unwrap();
514 if b_scalar
== minus1
{
515 throw_ub_format
!("exact_div: result of dividing MIN by -1 cannot be represented")
517 throw_ub_format
!("exact_div: {} cannot be divided by {} without remainder", a
, b
,)
520 // `Rem` says this is all right, so we can let `Div` do its job.
521 self.binop_ignore_overflow(BinOp
::Div
, a
, b
, dest
)
524 /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
525 /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
526 /// 0, so offset-by-0 (and only 0) is okay -- except that NULL cannot be offset by _any_ value.
527 pub fn ptr_offset_inbounds(
529 ptr
: Scalar
<M
::PointerTag
>,
530 pointee_ty
: Ty
<'tcx
>,
532 ) -> InterpResult
<'tcx
, Scalar
<M
::PointerTag
>> {
533 // We cannot overflow i64 as a type's size must be <= isize::MAX.
534 let pointee_size
= i64::try_from(self.layout_of(pointee_ty
)?
.size
.bytes()).unwrap();
535 // The computed offset, in bytes, cannot overflow an isize.
537 offset_count
.checked_mul(pointee_size
).ok_or(err_ub
!(PointerArithOverflow
))?
;
538 // The offset being in bounds cannot rely on "wrapping around" the address space.
539 // So, first rule out overflows in the pointer arithmetic.
540 let offset_ptr
= ptr
.ptr_signed_offset(offset_bytes
, self)?
;
541 // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
542 // memory between these pointers must be accessible. Note that we do not require the
543 // pointers to be properly aligned (unlike a read/write operation).
544 let min_ptr
= if offset_bytes
>= 0 { ptr }
else { offset_ptr }
;
545 let size
= offset_bytes
.unsigned_abs();
546 // This call handles checking for integer/NULL pointers.
547 self.memory
.check_ptr_access_align(
549 Size
::from_bytes(size
),
551 CheckInAllocMsg
::InboundsTest
,