1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
5 use std
::convert
::TryFrom
;
7 use rustc_hir
::def_id
::DefId
;
8 use rustc_middle
::mir
::{
10 interpret
::{uabs, ConstValue, GlobalId, InterpResult, Scalar}
,
14 use rustc_middle
::ty
::subst
::SubstsRef
;
15 use rustc_middle
::ty
::{Ty, TyCtxt}
;
16 use rustc_span
::symbol
::{sym, Symbol}
;
17 use rustc_target
::abi
::{Abi, LayoutOf as _, Primitive, Size}
;
20 util
::ensure_monomorphic_enough
, CheckInAllocMsg
, ImmTy
, InterpCx
, Machine
, OpTy
, PlaceTy
,
26 fn numeric_intrinsic
<'tcx
, Tag
>(
30 ) -> InterpResult
<'tcx
, Scalar
<Tag
>> {
31 let size
= match kind
{
32 Primitive
::Int(integer
, _
) => integer
.size(),
33 _
=> bug
!("invalid `{}` argument: {:?}", name
, bits
),
35 let extra
= 128 - u128
::from(size
.bits());
36 let bits_out
= match name
{
37 sym
::ctpop
=> u128
::from(bits
.count_ones()),
38 sym
::ctlz
=> u128
::from(bits
.leading_zeros()) - extra
,
39 sym
::cttz
=> u128
::from((bits
<< extra
).trailing_zeros()) - extra
,
40 sym
::bswap
=> (bits
<< extra
).swap_bytes(),
41 sym
::bitreverse
=> (bits
<< extra
).reverse_bits(),
42 _
=> bug
!("not a numeric intrinsic: {}", name
),
44 Ok(Scalar
::from_uint(bits_out
, size
))
47 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
48 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
49 crate fn eval_nullary_intrinsic
<'tcx
>(
51 param_env
: ty
::ParamEnv
<'tcx
>,
53 substs
: SubstsRef
<'tcx
>,
54 ) -> InterpResult
<'tcx
, ConstValue
<'tcx
>> {
55 let tp_ty
= substs
.type_at(0);
56 let name
= tcx
.item_name(def_id
);
59 ensure_monomorphic_enough(tcx
, tp_ty
)?
;
60 let alloc
= type_name
::alloc_type_name(tcx
, tp_ty
);
61 ConstValue
::Slice { data: alloc, start: 0, end: alloc.len() }
63 sym
::needs_drop
=> ConstValue
::from_bool(tp_ty
.needs_drop(tcx
, param_env
)),
64 sym
::min_align_of
| sym
::pref_align_of
=> {
65 let layout
= tcx
.layout_of(param_env
.and(tp_ty
)).map_err(|e
| err_inval
!(Layout(e
)))?
;
67 sym
::pref_align_of
=> layout
.align
.pref
.bytes(),
68 sym
::min_align_of
=> layout
.align
.abi
.bytes(),
71 ConstValue
::from_machine_usize(n
, &tcx
)
74 ensure_monomorphic_enough(tcx
, tp_ty
)?
;
75 ConstValue
::from_u64(tcx
.type_id_hash(tp_ty
))
77 sym
::variant_count
=> match tp_ty
.kind() {
78 ty
::Adt(ref adt
, _
) => ConstValue
::from_machine_usize(adt
.variants
.len() as u64, &tcx
),
84 | ty
::Infer(_
) => throw_inval
!(TooGeneric
),
100 | ty
::Generator(_
, _
, _
)
101 | ty
::GeneratorWitness(_
)
104 | ty
::Error(_
) => ConstValue
::from_machine_usize(0u64, &tcx
),
106 other
=> bug
!("`{}` is not a zero arg intrinsic", other
),
110 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
111 /// Returns `true` if emulation happened.
112 /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
113 /// intrinsic handling.
114 pub fn emulate_intrinsic(
116 instance
: ty
::Instance
<'tcx
>,
117 args
: &[OpTy
<'tcx
, M
::PointerTag
>],
118 ret
: Option
<(PlaceTy
<'tcx
, M
::PointerTag
>, mir
::BasicBlock
)>,
119 ) -> InterpResult
<'tcx
, bool
> {
120 let substs
= instance
.substs
;
121 let intrinsic_name
= self.tcx
.item_name(instance
.def_id());
123 // First handle intrinsics without return place.
124 let (dest
, ret
) = match ret
{
125 None
=> match intrinsic_name
{
126 sym
::transmute
=> throw_ub_format
!("transmuting to uninhabited type"),
127 sym
::abort
=> M
::abort(self, "the program aborted execution".to_owned())?
,
128 // Unsupported diverging intrinsic.
129 _
=> return Ok(false),
134 // Keep the patterns in this match ordered the same as the list in
135 // `src/librustc_middle/ty/constness.rs`
136 match intrinsic_name
{
137 sym
::caller_location
=> {
138 let span
= self.find_closest_untracked_caller_location();
139 let location
= self.alloc_caller_location_for_span(span
);
140 self.write_scalar(location
.ptr
, dest
)?
;
143 sym
::min_align_of_val
| sym
::size_of_val
=> {
144 let place
= self.deref_operand(args
[0])?
;
145 let (size
, align
) = self
146 .size_and_align_of(place
.meta
, place
.layout
)?
147 .ok_or_else(|| err_unsup_format
!("`extern type` does not have known layout"))?
;
149 let result
= match intrinsic_name
{
150 sym
::min_align_of_val
=> align
.bytes(),
151 sym
::size_of_val
=> size
.bytes(),
155 self.write_scalar(Scalar
::from_machine_usize(result
, self), dest
)?
;
163 | sym
::variant_count
=> {
164 let gid
= GlobalId { instance, promoted: None }
;
165 let ty
= match intrinsic_name
{
166 sym
::min_align_of
| sym
::pref_align_of
| sym
::variant_count
=> {
169 sym
::needs_drop
=> self.tcx
.types
.bool
,
170 sym
::type_id
=> self.tcx
.types
.u64,
171 sym
::type_name
=> self.tcx
.mk_static_str(),
172 _
=> bug
!("already checked for nullary intrinsics"),
175 self.tcx
.const_eval_global_id(self.param_env
, gid
, Some(self.tcx
.span
))?
;
176 let const_
= ty
::Const { val: ty::ConstKind::Value(val), ty }
;
177 let val
= self.const_to_op(&const_
, None
)?
;
178 self.copy_op(val
, dest
)?
;
187 | sym
::bitreverse
=> {
188 let ty
= substs
.type_at(0);
189 let layout_of
= self.layout_of(ty
)?
;
190 let val
= self.read_scalar(args
[0])?
.check_init()?
;
191 let bits
= self.force_bits(val
, layout_of
.size
)?
;
192 let kind
= match layout_of
.abi
{
193 Abi
::Scalar(ref scalar
) => scalar
.value
,
196 "{} called on invalid type {:?}",
201 let (nonzero
, intrinsic_name
) = match intrinsic_name
{
202 sym
::cttz_nonzero
=> (true, sym
::cttz
),
203 sym
::ctlz_nonzero
=> (true, sym
::ctlz
),
204 other
=> (false, other
),
206 if nonzero
&& bits
== 0 {
207 throw_ub_format
!("`{}_nonzero` called on 0", intrinsic_name
);
209 let out_val
= numeric_intrinsic(intrinsic_name
, bits
, kind
)?
;
210 self.write_scalar(out_val
, dest
)?
;
212 sym
::add_with_overflow
| sym
::sub_with_overflow
| sym
::mul_with_overflow
=> {
213 let lhs
= self.read_immediate(args
[0])?
;
214 let rhs
= self.read_immediate(args
[1])?
;
215 let bin_op
= match intrinsic_name
{
216 sym
::add_with_overflow
=> BinOp
::Add
,
217 sym
::sub_with_overflow
=> BinOp
::Sub
,
218 sym
::mul_with_overflow
=> BinOp
::Mul
,
219 _
=> bug
!("Already checked for int ops"),
221 self.binop_with_overflow(bin_op
, lhs
, rhs
, dest
)?
;
223 sym
::saturating_add
| sym
::saturating_sub
=> {
224 let l
= self.read_immediate(args
[0])?
;
225 let r
= self.read_immediate(args
[1])?
;
226 let is_add
= intrinsic_name
== sym
::saturating_add
;
227 let (val
, overflowed
, _ty
) =
228 self.overflowing_binary_op(if is_add { BinOp::Add }
else { BinOp::Sub }
, l
, r
)?
;
229 let val
= if overflowed
{
230 let num_bits
= l
.layout
.size
.bits();
231 if l
.layout
.abi
.is_signed() {
232 // For signed ints the saturated value depends on the sign of the first
233 // term since the sign of the second term can be inferred from this and
234 // the fact that the operation has overflowed (if either is 0 no
235 // overflow can occur)
236 let first_term
: u128
= self.force_bits(l
.to_scalar()?
, l
.layout
.size
)?
;
237 let first_term_positive
= first_term
& (1 << (num_bits
- 1)) == 0;
238 if first_term_positive
{
239 // Negative overflow not possible since the positive first term
240 // can only increase an (in range) negative term for addition
241 // or corresponding negated positive term for subtraction
243 (1u128 << (num_bits
- 1)) - 1, // max positive
244 Size
::from_bits(num_bits
),
247 // Positive overflow not possible for similar reason
249 Scalar
::from_uint(1u128 << (num_bits
- 1), Size
::from_bits(num_bits
))
256 u128
::MAX
>> (128 - num_bits
),
257 Size
::from_bits(num_bits
),
261 Scalar
::from_uint(0u128, Size
::from_bits(num_bits
))
267 self.write_scalar(val
, dest
)?
;
269 sym
::discriminant_value
=> {
270 let place
= self.deref_operand(args
[0])?
;
271 let discr_val
= self.read_discriminant(place
.into())?
.0;
272 self.write_scalar(discr_val
, dest
)?
;
280 | sym
::unchecked_rem
=> {
281 let l
= self.read_immediate(args
[0])?
;
282 let r
= self.read_immediate(args
[1])?
;
283 let bin_op
= match intrinsic_name
{
284 sym
::unchecked_shl
=> BinOp
::Shl
,
285 sym
::unchecked_shr
=> BinOp
::Shr
,
286 sym
::unchecked_add
=> BinOp
::Add
,
287 sym
::unchecked_sub
=> BinOp
::Sub
,
288 sym
::unchecked_mul
=> BinOp
::Mul
,
289 sym
::unchecked_div
=> BinOp
::Div
,
290 sym
::unchecked_rem
=> BinOp
::Rem
,
291 _
=> bug
!("Already checked for int ops"),
293 let (val
, overflowed
, _ty
) = self.overflowing_binary_op(bin_op
, l
, r
)?
;
295 let layout
= self.layout_of(substs
.type_at(0))?
;
296 let r_val
= self.force_bits(r
.to_scalar()?
, layout
.size
)?
;
297 if let sym
::unchecked_shl
| sym
::unchecked_shr
= intrinsic_name
{
298 throw_ub_format
!("overflowing shift by {} in `{}`", r_val
, intrinsic_name
);
300 throw_ub_format
!("overflow executing `{}`", intrinsic_name
);
303 self.write_scalar(val
, dest
)?
;
305 sym
::rotate_left
| sym
::rotate_right
=> {
306 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
307 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
308 let layout
= self.layout_of(substs
.type_at(0))?
;
309 let val
= self.read_scalar(args
[0])?
.check_init()?
;
310 let val_bits
= self.force_bits(val
, layout
.size
)?
;
311 let raw_shift
= self.read_scalar(args
[1])?
.check_init()?
;
312 let raw_shift_bits
= self.force_bits(raw_shift
, layout
.size
)?
;
313 let width_bits
= u128
::from(layout
.size
.bits());
314 let shift_bits
= raw_shift_bits
% width_bits
;
315 let inv_shift_bits
= (width_bits
- shift_bits
) % width_bits
;
316 let result_bits
= if intrinsic_name
== sym
::rotate_left
{
317 (val_bits
<< shift_bits
) | (val_bits
>> inv_shift_bits
)
319 (val_bits
>> shift_bits
) | (val_bits
<< inv_shift_bits
)
321 let truncated_bits
= self.truncate(result_bits
, layout
);
322 let result
= Scalar
::from_uint(truncated_bits
, layout
.size
);
323 self.write_scalar(result
, dest
)?
;
326 let ptr
= self.read_scalar(args
[0])?
.check_init()?
;
327 let offset_count
= self.read_scalar(args
[1])?
.to_machine_isize(self)?
;
328 let pointee_ty
= substs
.type_at(0);
330 let offset_ptr
= self.ptr_offset_inbounds(ptr
, pointee_ty
, offset_count
)?
;
331 self.write_scalar(offset_ptr
, dest
)?
;
333 sym
::arith_offset
=> {
334 let ptr
= self.read_scalar(args
[0])?
.check_init()?
;
335 let offset_count
= self.read_scalar(args
[1])?
.to_machine_isize(self)?
;
336 let pointee_ty
= substs
.type_at(0);
338 let pointee_size
= i64::try_from(self.layout_of(pointee_ty
)?
.size
.bytes()).unwrap();
339 let offset_bytes
= offset_count
.wrapping_mul(pointee_size
);
340 let offset_ptr
= ptr
.ptr_wrapping_signed_offset(offset_bytes
, self);
341 self.write_scalar(offset_ptr
, dest
)?
;
343 sym
::ptr_offset_from
=> {
344 let a
= self.read_immediate(args
[0])?
.to_scalar()?
;
345 let b
= self.read_immediate(args
[1])?
.to_scalar()?
;
347 // Special case: if both scalars are *equal integers*
348 // and not NULL, we pretend there is an allocation of size 0 right there,
349 // and their offset is 0. (There's never a valid object at NULL, making it an
350 // exception from the exception.)
351 // This is the dual to the special exception for offset-by-0
352 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
354 // Control flow is weird because we cannot early-return (to reach the
355 // `go_to_block` at the end).
356 let done
= if a
.is_bits() && b
.is_bits() {
357 let a
= a
.to_machine_usize(self)?
;
358 let b
= b
.to_machine_usize(self)?
;
359 if a
== b
&& a
!= 0 {
360 self.write_scalar(Scalar
::from_machine_isize(0, self), dest
)?
;
370 // General case: we need two pointers.
371 let a
= self.force_ptr(a
)?
;
372 let b
= self.force_ptr(b
)?
;
373 if a
.alloc_id
!= b
.alloc_id
{
375 "ptr_offset_from cannot compute offset of pointers into different \
379 let usize_layout
= self.layout_of(self.tcx
.types
.usize)?
;
380 let isize_layout
= self.layout_of(self.tcx
.types
.isize)?
;
381 let a_offset
= ImmTy
::from_uint(a
.offset
.bytes(), usize_layout
);
382 let b_offset
= ImmTy
::from_uint(b
.offset
.bytes(), usize_layout
);
383 let (val
, _overflowed
, _ty
) =
384 self.overflowing_binary_op(BinOp
::Sub
, a_offset
, b_offset
)?
;
385 let pointee_layout
= self.layout_of(substs
.type_at(0))?
;
386 let val
= ImmTy
::from_scalar(val
, isize_layout
);
387 let size
= ImmTy
::from_int(pointee_layout
.size
.bytes(), isize_layout
);
388 self.exact_div(val
, size
, dest
)?
;
393 self.copy_op_transmute(args
[0], dest
)?
;
395 sym
::assert_inhabited
=> {
396 let ty
= instance
.substs
.type_at(0);
397 let layout
= self.layout_of(ty
)?
;
399 if layout
.abi
.is_uninhabited() {
400 // The run-time intrinsic panics just to get a good backtrace; here we abort
401 // since there is no problem showing a backtrace even for aborts.
405 "aborted execution: attempted to instantiate uninhabited type `{}`",
411 sym
::simd_insert
=> {
412 let index
= u64::from(self.read_scalar(args
[1])?
.to_u32()?
);
415 let (len
, e_ty
) = input
.layout
.ty
.simd_size_and_type(*self.tcx
);
418 "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
424 input
.layout
, dest
.layout
,
425 "Return type `{}` must match vector type `{}`",
426 dest
.layout
.ty
, input
.layout
.ty
429 elem
.layout
.ty
, e_ty
,
430 "Scalar element type `{}` must match vector element type `{}`",
435 let place
= self.place_index(dest
, i
)?
;
436 let value
= if i
== index { elem }
else { self.operand_index(input, i)? }
;
437 self.copy_op(value
, place
)?
;
440 sym
::simd_extract
=> {
441 let index
= u64::from(self.read_scalar(args
[1])?
.to_u32()?
);
442 let (len
, e_ty
) = args
[0].layout
.ty
.simd_size_and_type(*self.tcx
);
445 "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
451 e_ty
, dest
.layout
.ty
,
452 "Return type `{}` must match vector element type `{}`",
455 self.copy_op(self.operand_index(args
[0], index
)?
, dest
)?
;
457 sym
::likely
| sym
::unlikely
=> {
458 // These just return their argument
459 self.copy_op(args
[0], dest
)?
;
462 let cond
= self.read_scalar(args
[0])?
.check_init()?
.to_bool()?
;
464 throw_ub_format
!("`assume` intrinsic called with `false`");
467 _
=> return Ok(false),
470 trace
!("{:?}", self.dump_place(*dest
));
471 self.go_to_block(ret
);
477 a
: ImmTy
<'tcx
, M
::PointerTag
>,
478 b
: ImmTy
<'tcx
, M
::PointerTag
>,
479 dest
: PlaceTy
<'tcx
, M
::PointerTag
>,
480 ) -> InterpResult
<'tcx
> {
481 // Performs an exact division, resulting in undefined behavior where
482 // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
483 // First, check x % y != 0 (or if that computation overflows).
484 let (res
, overflow
, _ty
) = self.overflowing_binary_op(BinOp
::Rem
, a
, b
)?
;
485 if overflow
|| res
.assert_bits(a
.layout
.size
) != 0 {
486 // Then, check if `b` is -1, which is the "MIN / -1" case.
487 let minus1
= Scalar
::from_int(-1, dest
.layout
.size
);
488 let b_scalar
= b
.to_scalar().unwrap();
489 if b_scalar
== minus1
{
490 throw_ub_format
!("exact_div: result of dividing MIN by -1 cannot be represented")
492 throw_ub_format
!("exact_div: {} cannot be divided by {} without remainder", a
, b
,)
495 // `Rem` says this is all right, so we can let `Div` do its job.
496 self.binop_ignore_overflow(BinOp
::Div
, a
, b
, dest
)
499 /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
500 /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
501 /// 0, so offset-by-0 (and only 0) is okay -- except that NULL cannot be offset by _any_ value.
502 pub fn ptr_offset_inbounds(
504 ptr
: Scalar
<M
::PointerTag
>,
505 pointee_ty
: Ty
<'tcx
>,
507 ) -> InterpResult
<'tcx
, Scalar
<M
::PointerTag
>> {
508 // We cannot overflow i64 as a type's size must be <= isize::MAX.
509 let pointee_size
= i64::try_from(self.layout_of(pointee_ty
)?
.size
.bytes()).unwrap();
510 // The computed offset, in bytes, cannot overflow an isize.
512 offset_count
.checked_mul(pointee_size
).ok_or(err_ub
!(PointerArithOverflow
))?
;
513 // The offset being in bounds cannot rely on "wrapping around" the address space.
514 // So, first rule out overflows in the pointer arithmetic.
515 let offset_ptr
= ptr
.ptr_signed_offset(offset_bytes
, self)?
;
516 // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
517 // memory between these pointers must be accessible. Note that we do not require the
518 // pointers to be properly aligned (unlike a read/write operation).
519 let min_ptr
= if offset_bytes
>= 0 { ptr }
else { offset_ptr }
;
520 let size
: u64 = uabs(offset_bytes
);
521 // This call handles checking for integer/NULL pointers.
522 self.memory
.check_ptr_access_align(
524 Size
::from_bytes(size
),
526 CheckInAllocMsg
::InboundsTest
,