1 use rustc_apfloat
::Float
;
3 use rustc_middle
::mir
::interpret
::{InterpResult, Scalar}
;
4 use rustc_middle
::ty
::layout
::{LayoutOf, TyAndLayout}
;
5 use rustc_middle
::ty
::{self, FloatTy, Ty}
;
6 use rustc_target
::abi
::Abi
;
8 use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy}
;
10 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
11 /// Applies the binary operation `op` to the two operands and writes a tuple of the result
12 /// and a boolean signifying the potential overflow to the destination.
14 /// `force_overflow_checks` indicates whether overflow checks should be done even when
15 /// `tcx.sess.overflow_checks()` is `false`.
16 pub fn binop_with_overflow(
19 force_overflow_checks
: bool
,
20 left
: &ImmTy
<'tcx
, M
::Provenance
>,
21 right
: &ImmTy
<'tcx
, M
::Provenance
>,
22 dest
: &PlaceTy
<'tcx
, M
::Provenance
>,
23 ) -> InterpResult
<'tcx
> {
24 let (val
, overflowed
, ty
) = self.overflowing_binary_op(op
, &left
, &right
)?
;
26 self.tcx
.intern_tup(&[ty
, self.tcx
.types
.bool
]),
28 "type mismatch for result of {:?}",
31 // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
32 // component when overflow checking is disabled.
34 overflowed
&& (force_overflow_checks
|| M
::checked_binop_checks_overflow(self));
35 // Write the result to `dest`.
36 if let Abi
::ScalarPair(..) = dest
.layout
.abi
{
37 // We can use the optimized path and avoid `place_field` (which might do
38 // `force_allocation`).
39 let pair
= Immediate
::ScalarPair(val
, Scalar
::from_bool(overflowed
));
40 self.write_immediate(pair
, dest
)?
;
42 assert
!(self.tcx
.sess
.opts
.unstable_opts
.randomize_layout
);
43 // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
44 // do a component-wise write here. This code path is slower than the above because
45 // `place_field` will have to `force_allocate` locals here.
46 let val_field
= self.place_field(&dest
, 0)?
;
47 self.write_scalar(val
, &val_field
)?
;
48 let overflowed_field
= self.place_field(&dest
, 1)?
;
49 self.write_scalar(Scalar
::from_bool(overflowed
), &overflowed_field
)?
;
54 /// Applies the binary operation `op` to the arguments and writes the result to the
56 pub fn binop_ignore_overflow(
59 left
: &ImmTy
<'tcx
, M
::Provenance
>,
60 right
: &ImmTy
<'tcx
, M
::Provenance
>,
61 dest
: &PlaceTy
<'tcx
, M
::Provenance
>,
62 ) -> InterpResult
<'tcx
> {
63 let (val
, _overflowed
, ty
) = self.overflowing_binary_op(op
, left
, right
)?
;
64 assert_eq
!(ty
, dest
.layout
.ty
, "type mismatch for result of {:?}", op
);
65 self.write_scalar(val
, dest
)
69 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
75 ) -> (Scalar
<M
::Provenance
>, bool
, Ty
<'tcx
>) {
76 use rustc_middle
::mir
::BinOp
::*;
78 let res
= match bin_op
{
85 _
=> span_bug
!(self.cur_span(), "Invalid operation on char: {:?}", bin_op
),
87 (Scalar
::from_bool(res
), false, self.tcx
.types
.bool
)
95 ) -> (Scalar
<M
::Provenance
>, bool
, Ty
<'tcx
>) {
96 use rustc_middle
::mir
::BinOp
::*;
98 let res
= match bin_op
{
108 _
=> span_bug
!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op
),
110 (Scalar
::from_bool(res
), false, self.tcx
.types
.bool
)
113 fn binary_float_op
<F
: Float
+ Into
<Scalar
<M
::Provenance
>>>(
119 ) -> (Scalar
<M
::Provenance
>, bool
, Ty
<'tcx
>) {
120 use rustc_middle
::mir
::BinOp
::*;
122 let (val
, ty
) = match bin_op
{
123 Eq
=> (Scalar
::from_bool(l
== r
), self.tcx
.types
.bool
),
124 Ne
=> (Scalar
::from_bool(l
!= r
), self.tcx
.types
.bool
),
125 Lt
=> (Scalar
::from_bool(l
< r
), self.tcx
.types
.bool
),
126 Le
=> (Scalar
::from_bool(l
<= r
), self.tcx
.types
.bool
),
127 Gt
=> (Scalar
::from_bool(l
> r
), self.tcx
.types
.bool
),
128 Ge
=> (Scalar
::from_bool(l
>= r
), self.tcx
.types
.bool
),
129 Add
=> ((l
+ r
).value
.into(), ty
),
130 Sub
=> ((l
- r
).value
.into(), ty
),
131 Mul
=> ((l
* r
).value
.into(), ty
),
132 Div
=> ((l
/ r
).value
.into(), ty
),
133 Rem
=> ((l
% r
).value
.into(), ty
),
134 _
=> span_bug
!(self.cur_span(), "invalid float op: `{:?}`", bin_op
),
142 // passing in raw bits
144 left_layout
: TyAndLayout
<'tcx
>,
146 right_layout
: TyAndLayout
<'tcx
>,
147 ) -> InterpResult
<'tcx
, (Scalar
<M
::Provenance
>, bool
, Ty
<'tcx
>)> {
148 use rustc_middle
::mir
::BinOp
::*;
150 // Shift ops can have an RHS with a different numeric type.
151 if bin_op
== Shl
|| bin_op
== Shr
{
152 let size
= u128
::from(left_layout
.size
.bits());
153 // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
154 // zero-extended form). This matches the codegen backend:
155 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
156 // The overflow check is also ignorant to the sign:
157 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
158 // This would behave rather strangely if we had integer types of size 256: a shift by
159 // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
160 // shift by -1i16 though would be considered overflowing. If we had integers of size
161 // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
162 // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
163 // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
164 // consistent results for the same value represented at different bit widths.
165 assert
!(size
<= 128);
166 let overflow
= r
>= size
;
167 // The shift offset is implicitly masked to the type size, to make sure this operation
168 // is always defined. This is the one MIR operator that does *not* directly map to a
169 // single LLVM operation. See
170 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
171 // for the corresponding truncation in our codegen backends.
173 let r
= u32::try_from(r
).unwrap(); // we masked so this will always fit
174 let result
= if left_layout
.abi
.is_signed() {
175 let l
= self.sign_extend(l
, left_layout
) as i128
;
176 let result
= match bin_op
{
177 Shl
=> l
.checked_shl(r
).unwrap(),
178 Shr
=> l
.checked_shr(r
).unwrap(),
184 Shl
=> l
.checked_shl(r
).unwrap(),
185 Shr
=> l
.checked_shr(r
).unwrap(),
189 let truncated
= self.truncate(result
, left_layout
);
190 return Ok((Scalar
::from_uint(truncated
, left_layout
.size
), overflow
, left_layout
.ty
));
193 // For the remaining ops, the types must be the same on both sides
194 if left_layout
.ty
!= right_layout
.ty
{
197 "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
206 let size
= left_layout
.size
;
208 // Operations that need special treatment for signed integers
209 if left_layout
.abi
.is_signed() {
210 let op
: Option
<fn(&i128
, &i128
) -> bool
> = match bin_op
{
211 Lt
=> Some(i128
::lt
),
212 Le
=> Some(i128
::le
),
213 Gt
=> Some(i128
::gt
),
214 Ge
=> Some(i128
::ge
),
217 if let Some(op
) = op
{
218 let l
= self.sign_extend(l
, left_layout
) as i128
;
219 let r
= self.sign_extend(r
, right_layout
) as i128
;
220 return Ok((Scalar
::from_bool(op(&l
, &r
)), false, self.tcx
.types
.bool
));
222 let op
: Option
<fn(i128
, i128
) -> (i128
, bool
)> = match bin_op
{
223 Div
if r
== 0 => throw_ub
!(DivisionByZero
),
224 Rem
if r
== 0 => throw_ub
!(RemainderByZero
),
225 Div
=> Some(i128
::overflowing_div
),
226 Rem
=> Some(i128
::overflowing_rem
),
227 Add
=> Some(i128
::overflowing_add
),
228 Sub
=> Some(i128
::overflowing_sub
),
229 Mul
=> Some(i128
::overflowing_mul
),
232 if let Some(op
) = op
{
233 let l
= self.sign_extend(l
, left_layout
) as i128
;
234 let r
= self.sign_extend(r
, right_layout
) as i128
;
236 // We need a special check for overflowing Rem and Div since they are *UB*
237 // on overflow, which can happen with "int_min $OP -1".
238 if matches
!(bin_op
, Rem
| Div
) {
239 if l
== size
.signed_int_min() && r
== -1 {
241 throw_ub
!(RemainderOverflow
)
243 throw_ub
!(DivisionOverflow
)
248 let (result
, oflo
) = op(l
, r
);
249 // This may be out-of-bounds for the result type, so we have to truncate ourselves.
250 // If that truncation loses any information, we have an overflow.
251 let result
= result
as u128
;
252 let truncated
= self.truncate(result
, left_layout
);
254 Scalar
::from_uint(truncated
, size
),
255 oflo
|| self.sign_extend(truncated
, left_layout
) != result
,
261 let (val
, ty
) = match bin_op
{
262 Eq
=> (Scalar
::from_bool(l
== r
), self.tcx
.types
.bool
),
263 Ne
=> (Scalar
::from_bool(l
!= r
), self.tcx
.types
.bool
),
265 Lt
=> (Scalar
::from_bool(l
< r
), self.tcx
.types
.bool
),
266 Le
=> (Scalar
::from_bool(l
<= r
), self.tcx
.types
.bool
),
267 Gt
=> (Scalar
::from_bool(l
> r
), self.tcx
.types
.bool
),
268 Ge
=> (Scalar
::from_bool(l
>= r
), self.tcx
.types
.bool
),
270 BitOr
=> (Scalar
::from_uint(l
| r
, size
), left_layout
.ty
),
271 BitAnd
=> (Scalar
::from_uint(l
& r
, size
), left_layout
.ty
),
272 BitXor
=> (Scalar
::from_uint(l ^ r
, size
), left_layout
.ty
),
274 Add
| Sub
| Mul
| Rem
| Div
=> {
275 assert
!(!left_layout
.abi
.is_signed());
276 let op
: fn(u128
, u128
) -> (u128
, bool
) = match bin_op
{
277 Add
=> u128
::overflowing_add
,
278 Sub
=> u128
::overflowing_sub
,
279 Mul
=> u128
::overflowing_mul
,
280 Div
if r
== 0 => throw_ub
!(DivisionByZero
),
281 Rem
if r
== 0 => throw_ub
!(RemainderByZero
),
282 Div
=> u128
::overflowing_div
,
283 Rem
=> u128
::overflowing_rem
,
286 let (result
, oflo
) = op(l
, r
);
287 // Truncate to target type.
288 // If that truncation loses any information, we have an overflow.
289 let truncated
= self.truncate(result
, left_layout
);
291 Scalar
::from_uint(truncated
, size
),
292 oflo
|| truncated
!= result
,
299 "invalid binary op {:?}: {:?}, {:?} (both {:?})",
310 /// Returns the result of the specified operation, whether it overflowed, and
312 pub fn overflowing_binary_op(
315 left
: &ImmTy
<'tcx
, M
::Provenance
>,
316 right
: &ImmTy
<'tcx
, M
::Provenance
>,
317 ) -> InterpResult
<'tcx
, (Scalar
<M
::Provenance
>, bool
, Ty
<'tcx
>)> {
319 "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
327 match left
.layout
.ty
.kind() {
329 assert_eq
!(left
.layout
.ty
, right
.layout
.ty
);
330 let left
= left
.to_scalar();
331 let right
= right
.to_scalar();
332 Ok(self.binary_char_op(bin_op
, left
.to_char()?
, right
.to_char()?
))
335 assert_eq
!(left
.layout
.ty
, right
.layout
.ty
);
336 let left
= left
.to_scalar();
337 let right
= right
.to_scalar();
338 Ok(self.binary_bool_op(bin_op
, left
.to_bool()?
, right
.to_bool()?
))
341 assert_eq
!(left
.layout
.ty
, right
.layout
.ty
);
342 let ty
= left
.layout
.ty
;
343 let left
= left
.to_scalar();
344 let right
= right
.to_scalar();
347 self.binary_float_op(bin_op
, ty
, left
.to_f32()?
, right
.to_f32()?
)
350 self.binary_float_op(bin_op
, ty
, left
.to_f64()?
, right
.to_f64()?
)
354 _
if left
.layout
.ty
.is_integral() => {
355 // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
357 right
.layout
.ty
.is_integral(),
358 "Unexpected types for BinOp: {:?} {:?} {:?}",
364 let l
= left
.to_scalar().to_bits(left
.layout
.size
)?
;
365 let r
= right
.to_scalar().to_bits(right
.layout
.size
)?
;
366 self.binary_int_op(bin_op
, l
, left
.layout
, r
, right
.layout
)
368 _
if left
.layout
.ty
.is_any_ptr() => {
369 // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
370 // (Even when both sides are pointers, their type might differ, see issue #91636)
372 right
.layout
.ty
.is_any_ptr() || right
.layout
.ty
.is_integral(),
373 "Unexpected types for BinOp: {:?} {:?} {:?}",
379 M
::binary_ptr_op(self, bin_op
, left
, right
)
383 "Invalid MIR: bad LHS type for binop: {:?}",
389 /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
394 left
: &ImmTy
<'tcx
, M
::Provenance
>,
395 right
: &ImmTy
<'tcx
, M
::Provenance
>,
396 ) -> InterpResult
<'tcx
, ImmTy
<'tcx
, M
::Provenance
>> {
397 let (val
, _overflow
, ty
) = self.overflowing_binary_op(bin_op
, left
, right
)?
;
398 Ok(ImmTy
::from_scalar(val
, self.layout_of(ty
)?
))
401 /// Returns the result of the specified operation, whether it overflowed, and
403 pub fn overflowing_unary_op(
406 val
: &ImmTy
<'tcx
, M
::Provenance
>,
407 ) -> InterpResult
<'tcx
, (Scalar
<M
::Provenance
>, bool
, Ty
<'tcx
>)> {
408 use rustc_middle
::mir
::UnOp
::*;
410 let layout
= val
.layout
;
411 let val
= val
.to_scalar();
412 trace
!("Running unary op {:?}: {:?} ({:?})", un_op
, val
, layout
.ty
);
414 match layout
.ty
.kind() {
416 let val
= val
.to_bool()?
;
417 let res
= match un_op
{
419 _
=> span_bug
!(self.cur_span(), "Invalid bool op {:?}", un_op
),
421 Ok((Scalar
::from_bool(res
), false, self.tcx
.types
.bool
))
424 let res
= match (un_op
, fty
) {
425 (Neg
, FloatTy
::F32
) => Scalar
::from_f32(-val
.to_f32()?
),
426 (Neg
, FloatTy
::F64
) => Scalar
::from_f64(-val
.to_f64()?
),
427 _
=> span_bug
!(self.cur_span(), "Invalid float op {:?}", un_op
),
429 Ok((res
, false, layout
.ty
))
432 assert
!(layout
.ty
.is_integral());
433 let val
= val
.to_bits(layout
.size
)?
;
434 let (res
, overflow
) = match un_op
{
435 Not
=> (self.truncate(!val
, layout
), false), // bitwise negation, then truncate
437 // arithmetic negation
438 assert
!(layout
.abi
.is_signed());
439 let val
= self.sign_extend(val
, layout
) as i128
;
440 let (res
, overflow
) = val
.overflowing_neg();
441 let res
= res
as u128
;
442 // Truncate to target type.
443 // If that truncation loses any information, we have an overflow.
444 let truncated
= self.truncate(res
, layout
);
445 (truncated
, overflow
|| self.sign_extend(truncated
, layout
) != res
)
448 Ok((Scalar
::from_uint(res
, layout
.size
), overflow
, layout
.ty
))
456 val
: &ImmTy
<'tcx
, M
::Provenance
>,
457 ) -> InterpResult
<'tcx
, ImmTy
<'tcx
, M
::Provenance
>> {
458 let (val
, _overflow
, ty
) = self.overflowing_unary_op(un_op
, val
)?
;
459 Ok(ImmTy
::from_scalar(val
, self.layout_of(ty
)?
))