]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_const_eval/src/interpret/operator.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / operator.rs
1 use std::convert::TryFrom;
2
3 use rustc_apfloat::Float;
4 use rustc_middle::mir;
5 use rustc_middle::mir::interpret::{InterpResult, Scalar};
6 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
7 use rustc_middle::ty::{self, FloatTy, Ty};
8
9 use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
10
11 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
12 /// Applies the binary operation `op` to the two operands and writes a tuple of the result
13 /// and a boolean signifying the potential overflow to the destination.
14 pub fn binop_with_overflow(
15 &mut self,
16 op: mir::BinOp,
17 left: &ImmTy<'tcx, M::PointerTag>,
18 right: &ImmTy<'tcx, M::PointerTag>,
19 dest: &PlaceTy<'tcx, M::PointerTag>,
20 ) -> InterpResult<'tcx> {
21 let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
22 debug_assert_eq!(
23 self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
24 dest.layout.ty,
25 "type mismatch for result of {:?}",
26 op,
27 );
28 let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
29 self.write_immediate(val, dest)
30 }
31
32 /// Applies the binary operation `op` to the arguments and writes the result to the
33 /// destination.
34 pub fn binop_ignore_overflow(
35 &mut self,
36 op: mir::BinOp,
37 left: &ImmTy<'tcx, M::PointerTag>,
38 right: &ImmTy<'tcx, M::PointerTag>,
39 dest: &PlaceTy<'tcx, M::PointerTag>,
40 ) -> InterpResult<'tcx> {
41 let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
42 assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
43 self.write_scalar(val, dest)
44 }
45 }
46
47 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
48 fn binary_char_op(
49 &self,
50 bin_op: mir::BinOp,
51 l: char,
52 r: char,
53 ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
54 use rustc_middle::mir::BinOp::*;
55
56 let res = match bin_op {
57 Eq => l == r,
58 Ne => l != r,
59 Lt => l < r,
60 Le => l <= r,
61 Gt => l > r,
62 Ge => l >= r,
63 _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
64 };
65 (Scalar::from_bool(res), false, self.tcx.types.bool)
66 }
67
68 fn binary_bool_op(
69 &self,
70 bin_op: mir::BinOp,
71 l: bool,
72 r: bool,
73 ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
74 use rustc_middle::mir::BinOp::*;
75
76 let res = match bin_op {
77 Eq => l == r,
78 Ne => l != r,
79 Lt => l < r,
80 Le => l <= r,
81 Gt => l > r,
82 Ge => l >= r,
83 BitAnd => l & r,
84 BitOr => l | r,
85 BitXor => l ^ r,
86 _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
87 };
88 (Scalar::from_bool(res), false, self.tcx.types.bool)
89 }
90
91 fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
92 &self,
93 bin_op: mir::BinOp,
94 ty: Ty<'tcx>,
95 l: F,
96 r: F,
97 ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
98 use rustc_middle::mir::BinOp::*;
99
100 let (val, ty) = match bin_op {
101 Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
102 Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
103 Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
104 Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
105 Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
106 Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
107 Add => ((l + r).value.into(), ty),
108 Sub => ((l - r).value.into(), ty),
109 Mul => ((l * r).value.into(), ty),
110 Div => ((l / r).value.into(), ty),
111 Rem => ((l % r).value.into(), ty),
112 _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
113 };
114 (val, false, ty)
115 }
116
117 fn binary_int_op(
118 &self,
119 bin_op: mir::BinOp,
120 // passing in raw bits
121 l: u128,
122 left_layout: TyAndLayout<'tcx>,
123 r: u128,
124 right_layout: TyAndLayout<'tcx>,
125 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
126 use rustc_middle::mir::BinOp::*;
127
128 // Shift ops can have an RHS with a different numeric type.
129 if bin_op == Shl || bin_op == Shr {
130 let size = u128::from(left_layout.size.bits());
131 // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
132 // zero-extended form). This matches the codegen backend:
133 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
134 // The overflow check is also ignorant to the sign:
135 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
136 // This would behave rather strangely if we had integer types of size 256: a shift by
137 // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
138 // shift by -1i16 though would be considered overflowing. If we had integers of size
139 // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
140 // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
141 // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
142 // consistent results for the same value represented at different bit widths.
143 assert!(size <= 128);
144 let overflow = r >= size;
145 // The shift offset is implicitly masked to the type size, to make sure this operation
146 // is always defined. This is the one MIR operator that does *not* directly map to a
147 // single LLVM operation. See
148 // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
149 // for the corresponding truncation in our codegen backends.
150 let r = r % size;
151 let r = u32::try_from(r).unwrap(); // we masked so this will always fit
152 let result = if left_layout.abi.is_signed() {
153 let l = self.sign_extend(l, left_layout) as i128;
154 let result = match bin_op {
155 Shl => l.checked_shl(r).unwrap(),
156 Shr => l.checked_shr(r).unwrap(),
157 _ => bug!(),
158 };
159 result as u128
160 } else {
161 match bin_op {
162 Shl => l.checked_shl(r).unwrap(),
163 Shr => l.checked_shr(r).unwrap(),
164 _ => bug!(),
165 }
166 };
167 let truncated = self.truncate(result, left_layout);
168 return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
169 }
170
171 // For the remaining ops, the types must be the same on both sides
172 if left_layout.ty != right_layout.ty {
173 span_bug!(
174 self.cur_span(),
175 "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
176 bin_op,
177 l,
178 left_layout.ty,
179 r,
180 right_layout.ty,
181 )
182 }
183
184 let size = left_layout.size;
185
186 // Operations that need special treatment for signed integers
187 if left_layout.abi.is_signed() {
188 let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
189 Lt => Some(i128::lt),
190 Le => Some(i128::le),
191 Gt => Some(i128::gt),
192 Ge => Some(i128::ge),
193 _ => None,
194 };
195 if let Some(op) = op {
196 let l = self.sign_extend(l, left_layout) as i128;
197 let r = self.sign_extend(r, right_layout) as i128;
198 return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
199 }
200 let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
201 Div if r == 0 => throw_ub!(DivisionByZero),
202 Rem if r == 0 => throw_ub!(RemainderByZero),
203 Div => Some(i128::overflowing_div),
204 Rem => Some(i128::overflowing_rem),
205 Add => Some(i128::overflowing_add),
206 Sub => Some(i128::overflowing_sub),
207 Mul => Some(i128::overflowing_mul),
208 _ => None,
209 };
210 if let Some(op) = op {
211 let l = self.sign_extend(l, left_layout) as i128;
212 let r = self.sign_extend(r, right_layout) as i128;
213
214 // We need a special check for overflowing Rem and Div since they are *UB*
215 // on overflow, which can happen with "int_min $OP -1".
216 if matches!(bin_op, Rem | Div) {
217 if l == size.signed_int_min() && r == -1 {
218 if bin_op == Rem {
219 throw_ub!(RemainderOverflow)
220 } else {
221 throw_ub!(DivisionOverflow)
222 }
223 }
224 }
225
226 let (result, oflo) = op(l, r);
227 // This may be out-of-bounds for the result type, so we have to truncate ourselves.
228 // If that truncation loses any information, we have an overflow.
229 let result = result as u128;
230 let truncated = self.truncate(result, left_layout);
231 return Ok((
232 Scalar::from_uint(truncated, size),
233 oflo || self.sign_extend(truncated, left_layout) != result,
234 left_layout.ty,
235 ));
236 }
237 }
238
239 let (val, ty) = match bin_op {
240 Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
241 Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
242
243 Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
244 Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
245 Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
246 Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
247
248 BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
249 BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
250 BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
251
252 Add | Sub | Mul | Rem | Div => {
253 assert!(!left_layout.abi.is_signed());
254 let op: fn(u128, u128) -> (u128, bool) = match bin_op {
255 Add => u128::overflowing_add,
256 Sub => u128::overflowing_sub,
257 Mul => u128::overflowing_mul,
258 Div if r == 0 => throw_ub!(DivisionByZero),
259 Rem if r == 0 => throw_ub!(RemainderByZero),
260 Div => u128::overflowing_div,
261 Rem => u128::overflowing_rem,
262 _ => bug!(),
263 };
264 let (result, oflo) = op(l, r);
265 // Truncate to target type.
266 // If that truncation loses any information, we have an overflow.
267 let truncated = self.truncate(result, left_layout);
268 return Ok((
269 Scalar::from_uint(truncated, size),
270 oflo || truncated != result,
271 left_layout.ty,
272 ));
273 }
274
275 _ => span_bug!(
276 self.cur_span(),
277 "invalid binary op {:?}: {:?}, {:?} (both {:?})",
278 bin_op,
279 l,
280 r,
281 right_layout.ty,
282 ),
283 };
284
285 Ok((val, false, ty))
286 }
287
288 /// Returns the result of the specified operation, whether it overflowed, and
289 /// the result type.
290 pub fn overflowing_binary_op(
291 &self,
292 bin_op: mir::BinOp,
293 left: &ImmTy<'tcx, M::PointerTag>,
294 right: &ImmTy<'tcx, M::PointerTag>,
295 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
296 trace!(
297 "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
298 bin_op,
299 *left,
300 left.layout.ty,
301 *right,
302 right.layout.ty
303 );
304
305 match left.layout.ty.kind() {
306 ty::Char => {
307 assert_eq!(left.layout.ty, right.layout.ty);
308 let left = left.to_scalar()?;
309 let right = right.to_scalar()?;
310 Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
311 }
312 ty::Bool => {
313 assert_eq!(left.layout.ty, right.layout.ty);
314 let left = left.to_scalar()?;
315 let right = right.to_scalar()?;
316 Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
317 }
318 ty::Float(fty) => {
319 assert_eq!(left.layout.ty, right.layout.ty);
320 let ty = left.layout.ty;
321 let left = left.to_scalar()?;
322 let right = right.to_scalar()?;
323 Ok(match fty {
324 FloatTy::F32 => {
325 self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
326 }
327 FloatTy::F64 => {
328 self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
329 }
330 })
331 }
332 _ if left.layout.ty.is_integral() => {
333 // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
334 assert!(
335 right.layout.ty.is_integral(),
336 "Unexpected types for BinOp: {:?} {:?} {:?}",
337 left.layout.ty,
338 bin_op,
339 right.layout.ty
340 );
341
342 let l = left.to_scalar()?.to_bits(left.layout.size)?;
343 let r = right.to_scalar()?.to_bits(right.layout.size)?;
344 self.binary_int_op(bin_op, l, left.layout, r, right.layout)
345 }
346 _ if left.layout.ty.is_any_ptr() => {
347 // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
348 // (Even when both sides are pointers, their type might differ, see issue #91636)
349 assert!(
350 right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
351 "Unexpected types for BinOp: {:?} {:?} {:?}",
352 left.layout.ty,
353 bin_op,
354 right.layout.ty
355 );
356
357 M::binary_ptr_op(self, bin_op, left, right)
358 }
359 _ => span_bug!(
360 self.cur_span(),
361 "Invalid MIR: bad LHS type for binop: {:?}",
362 left.layout.ty
363 ),
364 }
365 }
366
367 /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
368 #[inline]
369 pub fn binary_op(
370 &self,
371 bin_op: mir::BinOp,
372 left: &ImmTy<'tcx, M::PointerTag>,
373 right: &ImmTy<'tcx, M::PointerTag>,
374 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
375 let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
376 Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
377 }
378
379 /// Returns the result of the specified operation, whether it overflowed, and
380 /// the result type.
381 pub fn overflowing_unary_op(
382 &self,
383 un_op: mir::UnOp,
384 val: &ImmTy<'tcx, M::PointerTag>,
385 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
386 use rustc_middle::mir::UnOp::*;
387
388 let layout = val.layout;
389 let val = val.to_scalar()?;
390 trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
391
392 match layout.ty.kind() {
393 ty::Bool => {
394 let val = val.to_bool()?;
395 let res = match un_op {
396 Not => !val,
397 _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
398 };
399 Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
400 }
401 ty::Float(fty) => {
402 let res = match (un_op, fty) {
403 (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
404 (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
405 _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
406 };
407 Ok((res, false, layout.ty))
408 }
409 _ => {
410 assert!(layout.ty.is_integral());
411 let val = val.to_bits(layout.size)?;
412 let (res, overflow) = match un_op {
413 Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
414 Neg => {
415 // arithmetic negation
416 assert!(layout.abi.is_signed());
417 let val = self.sign_extend(val, layout) as i128;
418 let (res, overflow) = val.overflowing_neg();
419 let res = res as u128;
420 // Truncate to target type.
421 // If that truncation loses any information, we have an overflow.
422 let truncated = self.truncate(res, layout);
423 (truncated, overflow || self.sign_extend(truncated, layout) != res)
424 }
425 };
426 Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
427 }
428 }
429 }
430
431 pub fn unary_op(
432 &self,
433 un_op: mir::UnOp,
434 val: &ImmTy<'tcx, M::PointerTag>,
435 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
436 let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
437 Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
438 }
439 }