]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_middle/src/ty/util.rs
New upstream version 1.55.0+dfsg1
[rustc.git] / compiler / rustc_middle / src / ty / util.rs
1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
2
3 use crate::ich::NodeIdHashingMode;
4 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
5 use crate::ty::fold::TypeFolder;
6 use crate::ty::layout::IntegerExt;
7 use crate::ty::query::TyCtxtAt;
8 use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
9 use crate::ty::TyKind::*;
10 use crate::ty::{self, DebruijnIndex, DefIdTree, List, Ty, TyCtxt, TypeFoldable};
11 use rustc_apfloat::Float as _;
12 use rustc_ast as ast;
13 use rustc_attr::{self as attr, SignedInt, UnsignedInt};
14 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
15 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
16 use rustc_errors::ErrorReported;
17 use rustc_hir as hir;
18 use rustc_hir::def::DefKind;
19 use rustc_hir::def_id::DefId;
20 use rustc_macros::HashStable;
21 use rustc_span::DUMMY_SP;
22 use rustc_target::abi::{Integer, Size, TargetDataLayout};
23 use smallvec::SmallVec;
24 use std::{fmt, iter};
25
26 #[derive(Copy, Clone, Debug)]
27 pub struct Discr<'tcx> {
28 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
29 pub val: u128,
30 pub ty: Ty<'tcx>,
31 }
32
33 impl<'tcx> fmt::Display for Discr<'tcx> {
34 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
35 match *self.ty.kind() {
36 ty::Int(ity) => {
37 let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
38 let x = self.val;
39 // sign extend the raw representation to be an i128
40 let x = size.sign_extend(x) as i128;
41 write!(fmt, "{}", x)
42 }
43 _ => write!(fmt, "{}", self.val),
44 }
45 }
46 }
47
48 fn signed_min(size: Size) -> i128 {
49 size.sign_extend(1_u128 << (size.bits() - 1)) as i128
50 }
51
52 fn signed_max(size: Size) -> i128 {
53 i128::MAX >> (128 - size.bits())
54 }
55
56 fn unsigned_max(size: Size) -> u128 {
57 u128::MAX >> (128 - size.bits())
58 }
59
60 fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
61 let (int, signed) = match *ty.kind() {
62 Int(ity) => (Integer::from_int_ty(&tcx, ity), true),
63 Uint(uty) => (Integer::from_uint_ty(&tcx, uty), false),
64 _ => bug!("non integer discriminant"),
65 };
66 (int.size(), signed)
67 }
68
69 impl<'tcx> Discr<'tcx> {
70 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
71 pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
72 self.checked_add(tcx, 1).0
73 }
74 pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
75 let (size, signed) = int_size_and_signed(tcx, self.ty);
76 let (val, oflo) = if signed {
77 let min = signed_min(size);
78 let max = signed_max(size);
79 let val = size.sign_extend(self.val) as i128;
80 assert!(n < (i128::MAX as u128));
81 let n = n as i128;
82 let oflo = val > max - n;
83 let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
84 // zero the upper bits
85 let val = val as u128;
86 let val = size.truncate(val);
87 (val, oflo)
88 } else {
89 let max = unsigned_max(size);
90 let val = self.val;
91 let oflo = val > max - n;
92 let val = if oflo { n - (max - val) - 1 } else { val + n };
93 (val, oflo)
94 };
95 (Self { val, ty: self.ty }, oflo)
96 }
97 }
98
99 pub trait IntTypeExt {
100 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
101 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
102 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
103 }
104
105 impl IntTypeExt for attr::IntType {
106 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
107 match *self {
108 SignedInt(ast::IntTy::I8) => tcx.types.i8,
109 SignedInt(ast::IntTy::I16) => tcx.types.i16,
110 SignedInt(ast::IntTy::I32) => tcx.types.i32,
111 SignedInt(ast::IntTy::I64) => tcx.types.i64,
112 SignedInt(ast::IntTy::I128) => tcx.types.i128,
113 SignedInt(ast::IntTy::Isize) => tcx.types.isize,
114 UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
115 UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
116 UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
117 UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
118 UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
119 UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
120 }
121 }
122
123 fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
124 Discr { val: 0, ty: self.to_ty(tcx) }
125 }
126
127 fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
128 if let Some(val) = val {
129 assert_eq!(self.to_ty(tcx), val.ty);
130 let (new, oflo) = val.checked_add(tcx, 1);
131 if oflo { None } else { Some(new) }
132 } else {
133 Some(self.initial_discriminant(tcx))
134 }
135 }
136 }
137
138 impl<'tcx> TyCtxt<'tcx> {
139 /// Creates a hash of the type `Ty` which will be the same no matter what crate
140 /// context it's calculated within. This is used by the `type_id` intrinsic.
141 pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
142 let mut hasher = StableHasher::new();
143 let mut hcx = self.create_stable_hashing_context();
144
145 // We want the type_id be independent of the types free regions, so we
146 // erase them. The erase_regions() call will also anonymize bound
147 // regions, which is desirable too.
148 let ty = self.erase_regions(ty);
149
150 hcx.while_hashing_spans(false, |hcx| {
151 hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
152 ty.hash_stable(hcx, &mut hasher);
153 });
154 });
155 hasher.finish()
156 }
157
158 pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
159 if let ty::Adt(def, substs) = *ty.kind() {
160 for field in def.all_fields() {
161 let field_ty = field.ty(self, substs);
162 if let Error(_) = field_ty.kind() {
163 return true;
164 }
165 }
166 }
167 false
168 }
169
170 /// Attempts to returns the deeply last field of nested structures, but
171 /// does not apply any normalization in its search. Returns the same type
172 /// if input `ty` is not a structure at all.
173 pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
174 let tcx = self;
175 tcx.struct_tail_with_normalize(ty, |ty| ty)
176 }
177
178 /// Returns the deeply last field of nested structures, or the same type if
179 /// not a structure at all. Corresponds to the only possible unsized field,
180 /// and its type can be used to determine unsizing strategy.
181 ///
182 /// Should only be called if `ty` has no inference variables and does not
183 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
184 /// normalization attempt may cause compiler bugs.
185 pub fn struct_tail_erasing_lifetimes(
186 self,
187 ty: Ty<'tcx>,
188 param_env: ty::ParamEnv<'tcx>,
189 ) -> Ty<'tcx> {
190 let tcx = self;
191 tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
192 }
193
194 /// Returns the deeply last field of nested structures, or the same type if
195 /// not a structure at all. Corresponds to the only possible unsized field,
196 /// and its type can be used to determine unsizing strategy.
197 ///
198 /// This is parameterized over the normalization strategy (i.e. how to
199 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
200 /// function to indicate no normalization should take place.
201 ///
202 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
203 /// during codegen.
204 pub fn struct_tail_with_normalize(
205 self,
206 mut ty: Ty<'tcx>,
207 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
208 ) -> Ty<'tcx> {
209 let recursion_limit = self.recursion_limit();
210 for iteration in 0.. {
211 if !recursion_limit.value_within_limit(iteration) {
212 return self.ty_error_with_message(
213 DUMMY_SP,
214 &format!("reached the recursion limit finding the struct tail for {}", ty),
215 );
216 }
217 match *ty.kind() {
218 ty::Adt(def, substs) => {
219 if !def.is_struct() {
220 break;
221 }
222 match def.non_enum_variant().fields.last() {
223 Some(f) => ty = f.ty(self, substs),
224 None => break,
225 }
226 }
227
228 ty::Tuple(tys) => {
229 if let Some((&last_ty, _)) = tys.split_last() {
230 ty = last_ty.expect_ty();
231 } else {
232 break;
233 }
234 }
235
236 ty::Projection(_) | ty::Opaque(..) => {
237 let normalized = normalize(ty);
238 if ty == normalized {
239 return ty;
240 } else {
241 ty = normalized;
242 }
243 }
244
245 _ => {
246 break;
247 }
248 }
249 }
250 ty
251 }
252
253 /// Same as applying `struct_tail` on `source` and `target`, but only
254 /// keeps going as long as the two types are instances of the same
255 /// structure definitions.
256 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
257 /// whereas struct_tail produces `T`, and `Trait`, respectively.
258 ///
259 /// Should only be called if the types have no inference variables and do
260 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
261 /// normalization attempt may cause compiler bugs.
262 pub fn struct_lockstep_tails_erasing_lifetimes(
263 self,
264 source: Ty<'tcx>,
265 target: Ty<'tcx>,
266 param_env: ty::ParamEnv<'tcx>,
267 ) -> (Ty<'tcx>, Ty<'tcx>) {
268 let tcx = self;
269 tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
270 tcx.normalize_erasing_regions(param_env, ty)
271 })
272 }
273
274 /// Same as applying `struct_tail` on `source` and `target`, but only
275 /// keeps going as long as the two types are instances of the same
276 /// structure definitions.
277 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
278 /// whereas struct_tail produces `T`, and `Trait`, respectively.
279 ///
280 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
281 /// during codegen.
282 pub fn struct_lockstep_tails_with_normalize(
283 self,
284 source: Ty<'tcx>,
285 target: Ty<'tcx>,
286 normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
287 ) -> (Ty<'tcx>, Ty<'tcx>) {
288 let (mut a, mut b) = (source, target);
289 loop {
290 match (&a.kind(), &b.kind()) {
291 (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
292 if a_def == b_def && a_def.is_struct() =>
293 {
294 if let Some(f) = a_def.non_enum_variant().fields.last() {
295 a = f.ty(self, a_substs);
296 b = f.ty(self, b_substs);
297 } else {
298 break;
299 }
300 }
301 (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
302 if let Some(a_last) = a_tys.last() {
303 a = a_last.expect_ty();
304 b = b_tys.last().unwrap().expect_ty();
305 } else {
306 break;
307 }
308 }
309 (ty::Projection(_) | ty::Opaque(..), _)
310 | (_, ty::Projection(_) | ty::Opaque(..)) => {
311 // If either side is a projection, attempt to
312 // progress via normalization. (Should be safe to
313 // apply to both sides as normalization is
314 // idempotent.)
315 let a_norm = normalize(a);
316 let b_norm = normalize(b);
317 if a == a_norm && b == b_norm {
318 break;
319 } else {
320 a = a_norm;
321 b = b_norm;
322 }
323 }
324
325 _ => break,
326 }
327 }
328 (a, b)
329 }
330
331 /// Calculate the destructor of a given type.
332 pub fn calculate_dtor(
333 self,
334 adt_did: DefId,
335 validate: impl Fn(Self, DefId) -> Result<(), ErrorReported>,
336 ) -> Option<ty::Destructor> {
337 let drop_trait = self.lang_items().drop_trait()?;
338 self.ensure().coherent_trait(drop_trait);
339
340 let ty = self.type_of(adt_did);
341 let dtor_did = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
342 if let Some(item) = self.associated_items(impl_did).in_definition_order().next() {
343 if validate(self, impl_did).is_ok() {
344 return Some(item.def_id);
345 }
346 }
347 None
348 });
349
350 Some(ty::Destructor { did: dtor_did? })
351 }
352
353 /// Returns the set of types that are required to be alive in
354 /// order to run the destructor of `def` (see RFCs 769 and
355 /// 1238).
356 ///
357 /// Note that this returns only the constraints for the
358 /// destructor of `def` itself. For the destructors of the
359 /// contents, you need `adt_dtorck_constraint`.
360 pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
361 let dtor = match def.destructor(self) {
362 None => {
363 debug!("destructor_constraints({:?}) - no dtor", def.did);
364 return vec![];
365 }
366 Some(dtor) => dtor.did,
367 };
368
369 let impl_def_id = self.associated_item(dtor).container.id();
370 let impl_generics = self.generics_of(impl_def_id);
371
372 // We have a destructor - all the parameters that are not
373 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
374 // must be live.
375
376 // We need to return the list of parameters from the ADTs
377 // generics/substs that correspond to impure parameters on the
378 // impl's generics. This is a bit ugly, but conceptually simple:
379 //
380 // Suppose our ADT looks like the following
381 //
382 // struct S<X, Y, Z>(X, Y, Z);
383 //
384 // and the impl is
385 //
386 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
387 //
388 // We want to return the parameters (X, Y). For that, we match
389 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
390 // <P1, P2, P0>, and then look up which of the impl substs refer to
391 // parameters marked as pure.
392
393 let impl_substs = match *self.type_of(impl_def_id).kind() {
394 ty::Adt(def_, substs) if def_ == def => substs,
395 _ => bug!(),
396 };
397
398 let item_substs = match *self.type_of(def.did).kind() {
399 ty::Adt(def_, substs) if def_ == def => substs,
400 _ => bug!(),
401 };
402
403 let result = iter::zip(item_substs, impl_substs)
404 .filter(|&(_, k)| {
405 match k.unpack() {
406 GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
407 !impl_generics.region_param(ebr, self).pure_wrt_drop
408 }
409 GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
410 !impl_generics.type_param(pt, self).pure_wrt_drop
411 }
412 GenericArgKind::Const(&ty::Const {
413 val: ty::ConstKind::Param(ref pc), ..
414 }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
415 GenericArgKind::Lifetime(_)
416 | GenericArgKind::Type(_)
417 | GenericArgKind::Const(_) => {
418 // Not a type, const or region param: this should be reported
419 // as an error.
420 false
421 }
422 }
423 })
424 .map(|(item_param, _)| item_param)
425 .collect();
426 debug!("destructor_constraint({:?}) = {:?}", def.did, result);
427 result
428 }
429
430 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
431 /// that closures have a `DefId`, but the closure *expression* also
432 /// has a `HirId` that is located within the context where the
433 /// closure appears (and, sadly, a corresponding `NodeId`, since
434 /// those are not yet phased out). The parent of the closure's
435 /// `DefId` will also be the context where it appears.
436 pub fn is_closure(self, def_id: DefId) -> bool {
437 matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
438 }
439
440 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
441 pub fn is_trait(self, def_id: DefId) -> bool {
442 self.def_kind(def_id) == DefKind::Trait
443 }
444
445 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
446 /// and `false` otherwise.
447 pub fn is_trait_alias(self, def_id: DefId) -> bool {
448 self.def_kind(def_id) == DefKind::TraitAlias
449 }
450
451 /// Returns `true` if this `DefId` refers to the implicit constructor for
452 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
453 pub fn is_constructor(self, def_id: DefId) -> bool {
454 matches!(self.def_kind(def_id), DefKind::Ctor(..))
455 }
456
457 /// Given the def-ID of a fn or closure, returns the def-ID of
458 /// the innermost fn item that the closure is contained within.
459 /// This is a significant `DefId` because, when we do
460 /// type-checking, we type-check this fn item and all of its
461 /// (transitive) closures together. Therefore, when we fetch the
462 /// `typeck` the closure, for example, we really wind up
463 /// fetching the `typeck` the enclosing fn item.
464 pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
465 let mut def_id = def_id;
466 while self.is_closure(def_id) {
467 def_id = self.parent(def_id).unwrap_or_else(|| {
468 bug!("closure {:?} has no parent", def_id);
469 });
470 }
471 def_id
472 }
473
474 /// Given the `DefId` and substs a closure, creates the type of
475 /// `self` argument that the closure expects. For example, for a
476 /// `Fn` closure, this would return a reference type `&T` where
477 /// `T = closure_ty`.
478 ///
479 /// Returns `None` if this closure's kind has not yet been inferred.
480 /// This should only be possible during type checking.
481 ///
482 /// Note that the return value is a late-bound region and hence
483 /// wrapped in a binder.
484 pub fn closure_env_ty(
485 self,
486 closure_def_id: DefId,
487 closure_substs: SubstsRef<'tcx>,
488 env_region: ty::RegionKind,
489 ) -> Option<Ty<'tcx>> {
490 let closure_ty = self.mk_closure(closure_def_id, closure_substs);
491 let closure_kind_ty = closure_substs.as_closure().kind_ty();
492 let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
493 let env_ty = match closure_kind {
494 ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
495 ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
496 ty::ClosureKind::FnOnce => closure_ty,
497 };
498 Some(env_ty)
499 }
500
501 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
502 pub fn is_static(self, def_id: DefId) -> bool {
503 self.static_mutability(def_id).is_some()
504 }
505
506 /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
507 pub fn is_thread_local_static(self, def_id: DefId) -> bool {
508 self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
509 }
510
511 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
512 pub fn is_mutable_static(self, def_id: DefId) -> bool {
513 self.static_mutability(def_id) == Some(hir::Mutability::Mut)
514 }
515
516 /// Get the type of the pointer to the static that we use in MIR.
517 pub fn static_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
518 // Make sure that any constants in the static's type are evaluated.
519 let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
520
521 // Make sure that accesses to unsafe statics end up using raw pointers.
522 // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
523 if self.is_mutable_static(def_id) {
524 self.mk_mut_ptr(static_ty)
525 } else if self.is_foreign_item(def_id) {
526 self.mk_imm_ptr(static_ty)
527 } else {
528 self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
529 }
530 }
531
532 /// Expands the given impl trait type, stopping if the type is recursive.
533 pub fn try_expand_impl_trait_type(
534 self,
535 def_id: DefId,
536 substs: SubstsRef<'tcx>,
537 ) -> Result<Ty<'tcx>, Ty<'tcx>> {
538 let mut visitor = OpaqueTypeExpander {
539 seen_opaque_tys: FxHashSet::default(),
540 expanded_cache: FxHashMap::default(),
541 primary_def_id: Some(def_id),
542 found_recursion: false,
543 check_recursion: true,
544 tcx: self,
545 };
546
547 let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
548 if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
549 }
550 }
551
552 struct OpaqueTypeExpander<'tcx> {
553 // Contains the DefIds of the opaque types that are currently being
554 // expanded. When we expand an opaque type we insert the DefId of
555 // that type, and when we finish expanding that type we remove the
556 // its DefId.
557 seen_opaque_tys: FxHashSet<DefId>,
558 // Cache of all expansions we've seen so far. This is a critical
559 // optimization for some large types produced by async fn trees.
560 expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
561 primary_def_id: Option<DefId>,
562 found_recursion: bool,
563 /// Whether or not to check for recursive opaque types.
564 /// This is `true` when we're explicitly checking for opaque type
565 /// recursion, and 'false' otherwise to avoid unnecessary work.
566 check_recursion: bool,
567 tcx: TyCtxt<'tcx>,
568 }
569
570 impl<'tcx> OpaqueTypeExpander<'tcx> {
571 fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
572 if self.found_recursion {
573 return None;
574 }
575 let substs = substs.fold_with(self);
576 if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
577 let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
578 Some(expanded_ty) => expanded_ty,
579 None => {
580 let generic_ty = self.tcx.type_of(def_id);
581 let concrete_ty = generic_ty.subst(self.tcx, substs);
582 let expanded_ty = self.fold_ty(concrete_ty);
583 self.expanded_cache.insert((def_id, substs), expanded_ty);
584 expanded_ty
585 }
586 };
587 if self.check_recursion {
588 self.seen_opaque_tys.remove(&def_id);
589 }
590 Some(expanded_ty)
591 } else {
592 // If another opaque type that we contain is recursive, then it
593 // will report the error, so we don't have to.
594 self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
595 None
596 }
597 }
598 }
599
600 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
601 fn tcx(&self) -> TyCtxt<'tcx> {
602 self.tcx
603 }
604
605 fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
606 if let ty::Opaque(def_id, substs) = t.kind {
607 self.expand_opaque_ty(def_id, substs).unwrap_or(t)
608 } else if t.has_opaque_types() {
609 t.super_fold_with(self)
610 } else {
611 t
612 }
613 }
614 }
615
616 impl<'tcx> ty::TyS<'tcx> {
617 /// Returns the maximum value for the given numeric type (including `char`s)
618 /// or returns `None` if the type is not numeric.
619 pub fn numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
620 let val = match self.kind() {
621 ty::Int(_) | ty::Uint(_) => {
622 let (size, signed) = int_size_and_signed(tcx, self);
623 let val = if signed { signed_max(size) as u128 } else { unsigned_max(size) };
624 Some(val)
625 }
626 ty::Char => Some(std::char::MAX as u128),
627 ty::Float(fty) => Some(match fty {
628 ty::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
629 ty::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
630 }),
631 _ => None,
632 };
633 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
634 }
635
636 /// Returns the minimum value for the given numeric type (including `char`s)
637 /// or returns `None` if the type is not numeric.
638 pub fn numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
639 let val = match self.kind() {
640 ty::Int(_) | ty::Uint(_) => {
641 let (size, signed) = int_size_and_signed(tcx, self);
642 let val = if signed { size.truncate(signed_min(size) as u128) } else { 0 };
643 Some(val)
644 }
645 ty::Char => Some(0),
646 ty::Float(fty) => Some(match fty {
647 ty::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
648 ty::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
649 }),
650 _ => None,
651 };
652 val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
653 }
654
655 /// Checks whether values of this type `T` are *moved* or *copied*
656 /// when referenced -- this amounts to a check for whether `T:
657 /// Copy`, but note that we **don't** consider lifetimes when
658 /// doing this check. This means that we may generate MIR which
659 /// does copies even when the type actually doesn't satisfy the
660 /// full requirements for the `Copy` trait (cc #29149) -- this
661 /// winds up being reported as an error during NLL borrow check.
662 pub fn is_copy_modulo_regions(
663 &'tcx self,
664 tcx_at: TyCtxtAt<'tcx>,
665 param_env: ty::ParamEnv<'tcx>,
666 ) -> bool {
667 tcx_at.is_copy_raw(param_env.and(self))
668 }
669
670 /// Checks whether values of this type `T` have a size known at
671 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
672 /// for the purposes of this check, so it can be an
673 /// over-approximation in generic contexts, where one can have
674 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
675 /// actually carry lifetime requirements.
676 pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
677 self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
678 }
679
680 /// Checks whether values of this type `T` implement the `Freeze`
681 /// trait -- frozen types are those that do not contain a
682 /// `UnsafeCell` anywhere. This is a language concept used to
683 /// distinguish "true immutability", which is relevant to
684 /// optimization as well as the rules around static values. Note
685 /// that the `Freeze` trait is not exposed to end users and is
686 /// effectively an implementation detail.
687 pub fn is_freeze(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
688 self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
689 }
690
691 /// Fast path helper for testing if a type is `Freeze`.
692 ///
693 /// Returning true means the type is known to be `Freeze`. Returning
694 /// `false` means nothing -- could be `Freeze`, might not be.
695 fn is_trivially_freeze(&self) -> bool {
696 match self.kind() {
697 ty::Int(_)
698 | ty::Uint(_)
699 | ty::Float(_)
700 | ty::Bool
701 | ty::Char
702 | ty::Str
703 | ty::Never
704 | ty::Ref(..)
705 | ty::RawPtr(_)
706 | ty::FnDef(..)
707 | ty::Error(_)
708 | ty::FnPtr(_) => true,
709 ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_freeze),
710 ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
711 ty::Adt(..)
712 | ty::Bound(..)
713 | ty::Closure(..)
714 | ty::Dynamic(..)
715 | ty::Foreign(_)
716 | ty::Generator(..)
717 | ty::GeneratorWitness(_)
718 | ty::Infer(_)
719 | ty::Opaque(..)
720 | ty::Param(_)
721 | ty::Placeholder(_)
722 | ty::Projection(_) => false,
723 }
724 }
725
726 /// Checks whether values of this type `T` implement the `Unpin` trait.
727 pub fn is_unpin(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
728 self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self))
729 }
730
731 /// Fast path helper for testing if a type is `Unpin`.
732 ///
733 /// Returning true means the type is known to be `Unpin`. Returning
734 /// `false` means nothing -- could be `Unpin`, might not be.
735 fn is_trivially_unpin(&self) -> bool {
736 match self.kind() {
737 ty::Int(_)
738 | ty::Uint(_)
739 | ty::Float(_)
740 | ty::Bool
741 | ty::Char
742 | ty::Str
743 | ty::Never
744 | ty::Ref(..)
745 | ty::RawPtr(_)
746 | ty::FnDef(..)
747 | ty::Error(_)
748 | ty::FnPtr(_) => true,
749 ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_unpin),
750 ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_unpin(),
751 ty::Adt(..)
752 | ty::Bound(..)
753 | ty::Closure(..)
754 | ty::Dynamic(..)
755 | ty::Foreign(_)
756 | ty::Generator(..)
757 | ty::GeneratorWitness(_)
758 | ty::Infer(_)
759 | ty::Opaque(..)
760 | ty::Param(_)
761 | ty::Placeholder(_)
762 | ty::Projection(_) => false,
763 }
764 }
765
766 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
767 /// non-copy and *might* have a destructor attached; if it returns
768 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
769 ///
770 /// (Note that this implies that if `ty` has a destructor attached,
771 /// then `needs_drop` will definitely return `true` for `ty`.)
772 ///
773 /// Note that this method is used to check eligible types in unions.
774 #[inline]
775 pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
776 // Avoid querying in simple cases.
777 match needs_drop_components(self, &tcx.data_layout) {
778 Err(AlwaysRequiresDrop) => true,
779 Ok(components) => {
780 let query_ty = match *components {
781 [] => return false,
782 // If we've got a single component, call the query with that
783 // to increase the chance that we hit the query cache.
784 [component_ty] => component_ty,
785 _ => self,
786 };
787 // This doesn't depend on regions, so try to minimize distinct
788 // query keys used.
789 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
790 tcx.needs_drop_raw(param_env.and(erased))
791 }
792 }
793 }
794
795 /// Checks if `ty` has has a significant drop.
796 ///
797 /// Note that this method can return false even if `ty` has a destructor
798 /// attached; even if that is the case then the adt has been marked with
799 /// the attribute `rustc_insignificant_dtor`.
800 ///
801 /// Note that this method is used to check for change in drop order for
802 /// 2229 drop reorder migration analysis.
803 #[inline]
804 pub fn has_significant_drop(
805 &'tcx self,
806 tcx: TyCtxt<'tcx>,
807 param_env: ty::ParamEnv<'tcx>,
808 ) -> bool {
809 // Avoid querying in simple cases.
810 match needs_drop_components(self, &tcx.data_layout) {
811 Err(AlwaysRequiresDrop) => true,
812 Ok(components) => {
813 let query_ty = match *components {
814 [] => return false,
815 // If we've got a single component, call the query with that
816 // to increase the chance that we hit the query cache.
817 [component_ty] => component_ty,
818 _ => self,
819 };
820
821 // FIXME(#86868): We should be canonicalizing, or else moving this to a method of inference
822 // context, or *something* like that, but for now just avoid passing inference
823 // variables to queries that can't cope with them. Instead, conservatively
824 // return "true" (may change drop order).
825 if query_ty.needs_infer() {
826 return true;
827 }
828
829 // This doesn't depend on regions, so try to minimize distinct
830 // query keys used.
831 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
832 tcx.has_significant_drop_raw(param_env.and(erased))
833 }
834 }
835 }
836
837 /// Returns `true` if equality for this type is both reflexive and structural.
838 ///
839 /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
840 ///
841 /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
842 /// types, equality for the type as a whole is structural when it is the same as equality
843 /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
844 /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
845 /// that type.
846 ///
847 /// This function is "shallow" because it may return `true` for a composite type whose fields
848 /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
849 /// because equality for arrays is determined by the equality of each array element. If you
850 /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
851 /// down, you will need to use a type visitor.
852 #[inline]
853 pub fn is_structural_eq_shallow(&'tcx self, tcx: TyCtxt<'tcx>) -> bool {
854 match self.kind() {
855 // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
856 Adt(..) => tcx.has_structural_eq_impls(self),
857
858 // Primitive types that satisfy `Eq`.
859 Bool | Char | Int(_) | Uint(_) | Str | Never => true,
860
861 // Composite types that satisfy `Eq` when all of their fields do.
862 //
863 // Because this function is "shallow", we return `true` for these composites regardless
864 // of the type(s) contained within.
865 Ref(..) | Array(..) | Slice(_) | Tuple(..) => true,
866
867 // Raw pointers use bitwise comparison.
868 RawPtr(_) | FnPtr(_) => true,
869
870 // Floating point numbers are not `Eq`.
871 Float(_) => false,
872
873 // Conservatively return `false` for all others...
874
875 // Anonymous function types
876 FnDef(..) | Closure(..) | Dynamic(..) | Generator(..) => false,
877
878 // Generic or inferred types
879 //
880 // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
881 // called for known, fully-monomorphized types.
882 Projection(_) | Opaque(..) | Param(_) | Bound(..) | Placeholder(_) | Infer(_) => false,
883
884 Foreign(_) | GeneratorWitness(..) | Error(_) => false,
885 }
886 }
887
888 pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
889 match (&a.kind(), &b.kind()) {
890 (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
891 if did_a != did_b {
892 return false;
893 }
894
895 substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
896 }
897 _ => a == b,
898 }
899 }
900
901 /// Peel off all reference types in this type until there are none left.
902 ///
903 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
904 ///
905 /// # Examples
906 ///
907 /// - `u8` -> `u8`
908 /// - `&'a mut u8` -> `u8`
909 /// - `&'a &'b u8` -> `u8`
910 /// - `&'a *const &'b u8 -> *const &'b u8`
911 pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
912 let mut ty = self;
913 while let Ref(_, inner_ty, _) = ty.kind() {
914 ty = inner_ty;
915 }
916 ty
917 }
918
919 pub fn outer_exclusive_binder(&'tcx self) -> DebruijnIndex {
920 self.outer_exclusive_binder
921 }
922 }
923
924 pub enum ExplicitSelf<'tcx> {
925 ByValue,
926 ByReference(ty::Region<'tcx>, hir::Mutability),
927 ByRawPointer(hir::Mutability),
928 ByBox,
929 Other,
930 }
931
932 impl<'tcx> ExplicitSelf<'tcx> {
933 /// Categorizes an explicit self declaration like `self: SomeType`
934 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
935 /// `Other`.
936 /// This is mainly used to require the arbitrary_self_types feature
937 /// in the case of `Other`, to improve error messages in the common cases,
938 /// and to make `Other` non-object-safe.
939 ///
940 /// Examples:
941 ///
942 /// ```
943 /// impl<'a> Foo for &'a T {
944 /// // Legal declarations:
945 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
946 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
947 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
948 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
949 ///
950 /// // Invalid cases will be caught by `check_method_receiver`:
951 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
952 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
953 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
954 /// }
955 /// ```
956 ///
957 pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
958 where
959 P: Fn(Ty<'tcx>) -> bool,
960 {
961 use self::ExplicitSelf::*;
962
963 match *self_arg_ty.kind() {
964 _ if is_self_ty(self_arg_ty) => ByValue,
965 ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
966 ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
967 ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
968 _ => Other,
969 }
970 }
971 }
972
973 /// Returns a list of types such that the given type needs drop if and only if
974 /// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
975 /// this type always needs drop.
976 pub fn needs_drop_components(
977 ty: Ty<'tcx>,
978 target_layout: &TargetDataLayout,
979 ) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
980 match ty.kind() {
981 ty::Infer(ty::FreshIntTy(_))
982 | ty::Infer(ty::FreshFloatTy(_))
983 | ty::Bool
984 | ty::Int(_)
985 | ty::Uint(_)
986 | ty::Float(_)
987 | ty::Never
988 | ty::FnDef(..)
989 | ty::FnPtr(_)
990 | ty::Char
991 | ty::GeneratorWitness(..)
992 | ty::RawPtr(_)
993 | ty::Ref(..)
994 | ty::Str => Ok(SmallVec::new()),
995
996 // Foreign types can never have destructors.
997 ty::Foreign(..) => Ok(SmallVec::new()),
998
999 ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
1000
1001 ty::Slice(ty) => needs_drop_components(ty, target_layout),
1002 ty::Array(elem_ty, size) => {
1003 match needs_drop_components(elem_ty, target_layout) {
1004 Ok(v) if v.is_empty() => Ok(v),
1005 res => match size.val.try_to_bits(target_layout.pointer_size) {
1006 // Arrays of size zero don't need drop, even if their element
1007 // type does.
1008 Some(0) => Ok(SmallVec::new()),
1009 Some(_) => res,
1010 // We don't know which of the cases above we are in, so
1011 // return the whole type and let the caller decide what to
1012 // do.
1013 None => Ok(smallvec![ty]),
1014 },
1015 }
1016 }
1017 // If any field needs drop, then the whole tuple does.
1018 ty::Tuple(..) => ty.tuple_fields().try_fold(SmallVec::new(), move |mut acc, elem| {
1019 acc.extend(needs_drop_components(elem, target_layout)?);
1020 Ok(acc)
1021 }),
1022
1023 // These require checking for `Copy` bounds or `Adt` destructors.
1024 ty::Adt(..)
1025 | ty::Projection(..)
1026 | ty::Param(_)
1027 | ty::Bound(..)
1028 | ty::Placeholder(..)
1029 | ty::Opaque(..)
1030 | ty::Infer(_)
1031 | ty::Closure(..)
1032 | ty::Generator(..) => Ok(smallvec![ty]),
1033 }
1034 }
1035
1036 // Does the equivalent of
1037 // ```
1038 // let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1039 // folder.tcx().intern_*(&v)
1040 // ```
1041 pub fn fold_list<'tcx, F, T>(
1042 list: &'tcx ty::List<T>,
1043 folder: &mut F,
1044 intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
1045 ) -> &'tcx ty::List<T>
1046 where
1047 F: TypeFolder<'tcx>,
1048 T: TypeFoldable<'tcx> + PartialEq + Copy,
1049 {
1050 let mut iter = list.iter();
1051 // Look for the first element that changed
1052 if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| {
1053 let new_t = t.fold_with(folder);
1054 if new_t == t { None } else { Some((i, new_t)) }
1055 }) {
1056 // An element changed, prepare to intern the resulting list
1057 let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
1058 new_list.extend_from_slice(&list[..i]);
1059 new_list.push(new_t);
1060 new_list.extend(iter.map(|t| t.fold_with(folder)));
1061 intern(folder.tcx(), &new_list)
1062 } else {
1063 list
1064 }
1065 }
1066
1067 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
1068 pub struct AlwaysRequiresDrop;
1069
1070 /// Normalizes all opaque types in the given value, replacing them
1071 /// with their underlying types.
1072 pub fn normalize_opaque_types(
1073 tcx: TyCtxt<'tcx>,
1074 val: &'tcx List<ty::Predicate<'tcx>>,
1075 ) -> &'tcx List<ty::Predicate<'tcx>> {
1076 let mut visitor = OpaqueTypeExpander {
1077 seen_opaque_tys: FxHashSet::default(),
1078 expanded_cache: FxHashMap::default(),
1079 primary_def_id: None,
1080 found_recursion: false,
1081 check_recursion: false,
1082 tcx,
1083 };
1084 val.fold_with(&mut visitor)
1085 }
1086
1087 pub fn provide(providers: &mut ty::query::Providers) {
1088 *providers = ty::query::Providers { normalize_opaque_types, ..*providers }
1089 }