1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
3 use crate::middle
::codegen_fn_attrs
::CodegenFnAttrFlags
;
4 use crate::ty
::fold
::{FallibleTypeFolder, TypeFolder}
;
5 use crate::ty
::layout
::IntegerExt
;
6 use crate::ty
::query
::TyCtxtAt
;
7 use crate::ty
::subst
::{GenericArgKind, Subst, SubstsRef}
;
9 self, Const
, DebruijnIndex
, DefIdTree
, List
, ReEarlyBound
, Region
, Ty
, TyCtxt
, TyKind
::*,
12 use rustc_apfloat
::Float
as _
;
14 use rustc_attr
::{self as attr, SignedInt, UnsignedInt}
;
15 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
16 use rustc_data_structures
::intern
::Interned
;
17 use rustc_data_structures
::stable_hasher
::{HashStable, StableHasher}
;
18 use rustc_errors
::ErrorGuaranteed
;
20 use rustc_hir
::def
::{CtorOf, DefKind, Res}
;
21 use rustc_hir
::def_id
::DefId
;
22 use rustc_macros
::HashStable
;
23 use rustc_span
::{sym, DUMMY_SP}
;
24 use rustc_target
::abi
::{Integer, Size, TargetDataLayout}
;
25 use smallvec
::SmallVec
;
28 #[derive(Copy, Clone, Debug)]
29 pub struct Discr
<'tcx
> {
30 /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
35 impl<'tcx
> fmt
::Display
for Discr
<'tcx
> {
36 fn fmt(&self, fmt
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
37 match *self.ty
.kind() {
39 let size
= ty
::tls
::with(|tcx
| Integer
::from_int_ty(&tcx
, ity
).size());
41 // sign extend the raw representation to be an i128
42 let x
= size
.sign_extend(x
) as i128
;
45 _
=> write
!(fmt
, "{}", self.val
),
50 fn int_size_and_signed
<'tcx
>(tcx
: TyCtxt
<'tcx
>, ty
: Ty
<'tcx
>) -> (Size
, bool
) {
51 let (int
, signed
) = match *ty
.kind() {
52 Int(ity
) => (Integer
::from_int_ty(&tcx
, ity
), true),
53 Uint(uty
) => (Integer
::from_uint_ty(&tcx
, uty
), false),
54 _
=> bug
!("non integer discriminant"),
59 impl<'tcx
> Discr
<'tcx
> {
60 /// Adds `1` to the value and wraps around if the maximum for the type is reached.
61 pub fn wrap_incr(self, tcx
: TyCtxt
<'tcx
>) -> Self {
62 self.checked_add(tcx
, 1).0
64 pub fn checked_add(self, tcx
: TyCtxt
<'tcx
>, n
: u128
) -> (Self, bool
) {
65 let (size
, signed
) = int_size_and_signed(tcx
, self.ty
);
66 let (val
, oflo
) = if signed
{
67 let min
= size
.signed_int_min();
68 let max
= size
.signed_int_max();
69 let val
= size
.sign_extend(self.val
) as i128
;
70 assert
!(n
< (i128
::MAX
as u128
));
72 let oflo
= val
> max
- n
;
73 let val
= if oflo { min + (n - (max - val) - 1) }
else { val + n }
;
74 // zero the upper bits
75 let val
= val
as u128
;
76 let val
= size
.truncate(val
);
79 let max
= size
.unsigned_int_max();
81 let oflo
= val
> max
- n
;
82 let val
= if oflo { n - (max - val) - 1 }
else { val + n }
;
85 (Self { val, ty: self.ty }
, oflo
)
89 pub trait IntTypeExt
{
90 fn to_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Ty
<'tcx
>;
91 fn disr_incr
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>, val
: Option
<Discr
<'tcx
>>) -> Option
<Discr
<'tcx
>>;
92 fn initial_discriminant
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Discr
<'tcx
>;
95 impl IntTypeExt
for attr
::IntType
{
96 fn to_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Ty
<'tcx
> {
98 SignedInt(ast
::IntTy
::I8
) => tcx
.types
.i8,
99 SignedInt(ast
::IntTy
::I16
) => tcx
.types
.i16,
100 SignedInt(ast
::IntTy
::I32
) => tcx
.types
.i32,
101 SignedInt(ast
::IntTy
::I64
) => tcx
.types
.i64,
102 SignedInt(ast
::IntTy
::I128
) => tcx
.types
.i128
,
103 SignedInt(ast
::IntTy
::Isize
) => tcx
.types
.isize,
104 UnsignedInt(ast
::UintTy
::U8
) => tcx
.types
.u8,
105 UnsignedInt(ast
::UintTy
::U16
) => tcx
.types
.u16,
106 UnsignedInt(ast
::UintTy
::U32
) => tcx
.types
.u32,
107 UnsignedInt(ast
::UintTy
::U64
) => tcx
.types
.u64,
108 UnsignedInt(ast
::UintTy
::U128
) => tcx
.types
.u128
,
109 UnsignedInt(ast
::UintTy
::Usize
) => tcx
.types
.usize,
113 fn initial_discriminant
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Discr
<'tcx
> {
114 Discr { val: 0, ty: self.to_ty(tcx) }
117 fn disr_incr
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>, val
: Option
<Discr
<'tcx
>>) -> Option
<Discr
<'tcx
>> {
118 if let Some(val
) = val
{
119 assert_eq
!(self.to_ty(tcx
), val
.ty
);
120 let (new
, oflo
) = val
.checked_add(tcx
, 1);
121 if oflo { None }
else { Some(new) }
123 Some(self.initial_discriminant(tcx
))
128 impl<'tcx
> TyCtxt
<'tcx
> {
129 /// Creates a hash of the type `Ty` which will be the same no matter what crate
130 /// context it's calculated within. This is used by the `type_id` intrinsic.
131 pub fn type_id_hash(self, ty
: Ty
<'tcx
>) -> u64 {
132 let mut hasher
= StableHasher
::new();
133 let mut hcx
= self.create_stable_hashing_context();
135 // We want the type_id be independent of the types free regions, so we
136 // erase them. The erase_regions() call will also anonymize bound
137 // regions, which is desirable too.
138 let ty
= self.erase_regions(ty
);
140 hcx
.while_hashing_spans(false, |hcx
| ty
.hash_stable(hcx
, &mut hasher
));
144 pub fn res_generics_def_id(self, res
: Res
) -> Option
<DefId
> {
146 Res
::Def(DefKind
::Ctor(CtorOf
::Variant
, _
), def_id
) => {
147 Some(self.parent(def_id
).and_then(|def_id
| self.parent(def_id
)).unwrap())
149 Res
::Def(DefKind
::Variant
| DefKind
::Ctor(CtorOf
::Struct
, _
), def_id
) => {
150 Some(self.parent(def_id
).unwrap())
152 // Other `DefKind`s don't have generics and would ICE when calling
162 | DefKind
::TraitAlias
166 | DefKind
::AssocConst
175 pub fn has_error_field(self, ty
: Ty
<'tcx
>) -> bool
{
176 if let ty
::Adt(def
, substs
) = *ty
.kind() {
177 for field
in def
.all_fields() {
178 let field_ty
= field
.ty(self, substs
);
179 if let Error(_
) = field_ty
.kind() {
187 /// Attempts to returns the deeply last field of nested structures, but
188 /// does not apply any normalization in its search. Returns the same type
189 /// if input `ty` is not a structure at all.
190 pub fn struct_tail_without_normalization(self, ty
: Ty
<'tcx
>) -> Ty
<'tcx
> {
192 tcx
.struct_tail_with_normalize(ty
, |ty
| ty
)
195 /// Returns the deeply last field of nested structures, or the same type if
196 /// not a structure at all. Corresponds to the only possible unsized field,
197 /// and its type can be used to determine unsizing strategy.
199 /// Should only be called if `ty` has no inference variables and does not
200 /// need its lifetimes preserved (e.g. as part of codegen); otherwise
201 /// normalization attempt may cause compiler bugs.
202 pub fn struct_tail_erasing_lifetimes(
205 param_env
: ty
::ParamEnv
<'tcx
>,
208 tcx
.struct_tail_with_normalize(ty
, |ty
| tcx
.normalize_erasing_regions(param_env
, ty
))
211 /// Returns the deeply last field of nested structures, or the same type if
212 /// not a structure at all. Corresponds to the only possible unsized field,
213 /// and its type can be used to determine unsizing strategy.
215 /// This is parameterized over the normalization strategy (i.e. how to
216 /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
217 /// function to indicate no normalization should take place.
219 /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
221 pub fn struct_tail_with_normalize(
224 mut normalize
: impl FnMut(Ty
<'tcx
>) -> Ty
<'tcx
>,
226 let recursion_limit
= self.recursion_limit();
227 for iteration
in 0.. {
228 if !recursion_limit
.value_within_limit(iteration
) {
229 return self.ty_error_with_message(
231 &format
!("reached the recursion limit finding the struct tail for {}", ty
),
235 ty
::Adt(def
, substs
) => {
236 if !def
.is_struct() {
239 match def
.non_enum_variant().fields
.last() {
240 Some(f
) => ty
= f
.ty(self, substs
),
245 ty
::Tuple(tys
) if let Some((&last_ty
, _
)) = tys
.split_last() => {
249 ty
::Tuple(_
) => break,
251 ty
::Projection(_
) | ty
::Opaque(..) => {
252 let normalized
= normalize(ty
);
253 if ty
== normalized
{
268 /// Same as applying `struct_tail` on `source` and `target`, but only
269 /// keeps going as long as the two types are instances of the same
270 /// structure definitions.
271 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
272 /// whereas struct_tail produces `T`, and `Trait`, respectively.
274 /// Should only be called if the types have no inference variables and do
275 /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
276 /// normalization attempt may cause compiler bugs.
277 pub fn struct_lockstep_tails_erasing_lifetimes(
281 param_env
: ty
::ParamEnv
<'tcx
>,
282 ) -> (Ty
<'tcx
>, Ty
<'tcx
>) {
284 tcx
.struct_lockstep_tails_with_normalize(source
, target
, |ty
| {
285 tcx
.normalize_erasing_regions(param_env
, ty
)
289 /// Same as applying `struct_tail` on `source` and `target`, but only
290 /// keeps going as long as the two types are instances of the same
291 /// structure definitions.
292 /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
293 /// whereas struct_tail produces `T`, and `Trait`, respectively.
295 /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
297 pub fn struct_lockstep_tails_with_normalize(
301 normalize
: impl Fn(Ty
<'tcx
>) -> Ty
<'tcx
>,
302 ) -> (Ty
<'tcx
>, Ty
<'tcx
>) {
303 let (mut a
, mut b
) = (source
, target
);
305 match (&a
.kind(), &b
.kind()) {
306 (&Adt(a_def
, a_substs
), &Adt(b_def
, b_substs
))
307 if a_def
== b_def
&& a_def
.is_struct() =>
309 if let Some(f
) = a_def
.non_enum_variant().fields
.last() {
310 a
= f
.ty(self, a_substs
);
311 b
= f
.ty(self, b_substs
);
316 (&Tuple(a_tys
), &Tuple(b_tys
)) if a_tys
.len() == b_tys
.len() => {
317 if let Some(&a_last
) = a_tys
.last() {
319 b
= *b_tys
.last().unwrap();
324 (ty
::Projection(_
) | ty
::Opaque(..), _
)
325 | (_
, ty
::Projection(_
) | ty
::Opaque(..)) => {
326 // If either side is a projection, attempt to
327 // progress via normalization. (Should be safe to
328 // apply to both sides as normalization is
330 let a_norm
= normalize(a
);
331 let b_norm
= normalize(b
);
332 if a
== a_norm
&& b
== b_norm
{
346 /// Calculate the destructor of a given type.
347 pub fn calculate_dtor(
350 validate
: impl Fn(Self, DefId
) -> Result
<(), ErrorGuaranteed
>,
351 ) -> Option
<ty
::Destructor
> {
352 let drop_trait
= self.lang_items().drop_trait()?
;
353 self.ensure().coherent_trait(drop_trait
);
355 let ty
= self.type_of(adt_did
);
356 let (did
, constness
) = self.find_map_relevant_impl(drop_trait
, ty
, |impl_did
| {
357 if let Some(item_id
) = self.associated_item_def_ids(impl_did
).first() {
358 if validate(self, impl_did
).is_ok() {
359 return Some((*item_id
, self.impl_constness(impl_did
)));
365 Some(ty
::Destructor { did, constness }
)
368 /// Returns the set of types that are required to be alive in
369 /// order to run the destructor of `def` (see RFCs 769 and
372 /// Note that this returns only the constraints for the
373 /// destructor of `def` itself. For the destructors of the
374 /// contents, you need `adt_dtorck_constraint`.
375 pub fn destructor_constraints(self, def
: ty
::AdtDef
<'tcx
>) -> Vec
<ty
::subst
::GenericArg
<'tcx
>> {
376 let dtor
= match def
.destructor(self) {
378 debug
!("destructor_constraints({:?}) - no dtor", def
.did());
381 Some(dtor
) => dtor
.did
,
384 let impl_def_id
= self.associated_item(dtor
).container
.id();
385 let impl_generics
= self.generics_of(impl_def_id
);
387 // We have a destructor - all the parameters that are not
388 // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
391 // We need to return the list of parameters from the ADTs
392 // generics/substs that correspond to impure parameters on the
393 // impl's generics. This is a bit ugly, but conceptually simple:
395 // Suppose our ADT looks like the following
397 // struct S<X, Y, Z>(X, Y, Z);
401 // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
403 // We want to return the parameters (X, Y). For that, we match
404 // up the item-substs <X, Y, Z> with the substs on the impl ADT,
405 // <P1, P2, P0>, and then look up which of the impl substs refer to
406 // parameters marked as pure.
408 let impl_substs
= match *self.type_of(impl_def_id
).kind() {
409 ty
::Adt(def_
, substs
) if def_
== def
=> substs
,
413 let item_substs
= match *self.type_of(def
.did()).kind() {
414 ty
::Adt(def_
, substs
) if def_
== def
=> substs
,
418 let result
= iter
::zip(item_substs
, impl_substs
)
421 GenericArgKind
::Lifetime(Region(Interned(ReEarlyBound(ref ebr
), _
))) => {
422 !impl_generics
.region_param(ebr
, self).pure_wrt_drop
424 GenericArgKind
::Type(Ty(Interned(
425 ty
::TyS { kind: ty::Param(ref pt), .. }
,
427 ))) => !impl_generics
.type_param(pt
, self).pure_wrt_drop
,
428 GenericArgKind
::Const(Const(Interned(
429 ty
::ConstS { val: ty::ConstKind::Param(ref pc), .. }
,
431 ))) => !impl_generics
.const_param(pc
, self).pure_wrt_drop
,
432 GenericArgKind
::Lifetime(_
)
433 | GenericArgKind
::Type(_
)
434 | GenericArgKind
::Const(_
) => {
435 // Not a type, const or region param: this should be reported
441 .map(|(item_param
, _
)| item_param
)
443 debug
!("destructor_constraint({:?}) = {:?}", def
.did(), result
);
447 /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
448 /// that closures have a `DefId`, but the closure *expression* also
449 /// has a `HirId` that is located within the context where the
450 /// closure appears (and, sadly, a corresponding `NodeId`, since
451 /// those are not yet phased out). The parent of the closure's
452 /// `DefId` will also be the context where it appears.
453 pub fn is_closure(self, def_id
: DefId
) -> bool
{
454 matches
!(self.def_kind(def_id
), DefKind
::Closure
| DefKind
::Generator
)
457 /// Returns `true` if `def_id` refers to a definition that does not have its own
458 /// type-checking context, i.e. closure, generator or inline const.
459 pub fn is_typeck_child(self, def_id
: DefId
) -> bool
{
461 self.def_kind(def_id
),
462 DefKind
::Closure
| DefKind
::Generator
| DefKind
::InlineConst
466 /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
467 pub fn is_trait(self, def_id
: DefId
) -> bool
{
468 self.def_kind(def_id
) == DefKind
::Trait
471 /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
472 /// and `false` otherwise.
473 pub fn is_trait_alias(self, def_id
: DefId
) -> bool
{
474 self.def_kind(def_id
) == DefKind
::TraitAlias
477 /// Returns `true` if this `DefId` refers to the implicit constructor for
478 /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
479 pub fn is_constructor(self, def_id
: DefId
) -> bool
{
480 matches
!(self.def_kind(def_id
), DefKind
::Ctor(..))
483 /// Given the `DefId`, returns the `DefId` of the innermost item that
484 /// has its own type-checking context or "inference environment".
486 /// For example, a closure has its own `DefId`, but it is type-checked
487 /// with the containing item. Similarly, an inline const block has its
488 /// own `DefId` but it is type-checked together with the containing item.
490 /// Therefore, when we fetch the
491 /// `typeck` the closure, for example, we really wind up
492 /// fetching the `typeck` the enclosing fn item.
493 pub fn typeck_root_def_id(self, def_id
: DefId
) -> DefId
{
494 let mut def_id
= def_id
;
495 while self.is_typeck_child(def_id
) {
496 def_id
= self.parent(def_id
).unwrap_or_else(|| {
497 bug
!("closure {:?} has no parent", def_id
);
503 /// Given the `DefId` and substs a closure, creates the type of
504 /// `self` argument that the closure expects. For example, for a
505 /// `Fn` closure, this would return a reference type `&T` where
506 /// `T = closure_ty`.
508 /// Returns `None` if this closure's kind has not yet been inferred.
509 /// This should only be possible during type checking.
511 /// Note that the return value is a late-bound region and hence
512 /// wrapped in a binder.
513 pub fn closure_env_ty(
515 closure_def_id
: DefId
,
516 closure_substs
: SubstsRef
<'tcx
>,
517 env_region
: ty
::RegionKind
,
518 ) -> Option
<Ty
<'tcx
>> {
519 let closure_ty
= self.mk_closure(closure_def_id
, closure_substs
);
520 let closure_kind_ty
= closure_substs
.as_closure().kind_ty();
521 let closure_kind
= closure_kind_ty
.to_opt_closure_kind()?
;
522 let env_ty
= match closure_kind
{
523 ty
::ClosureKind
::Fn
=> self.mk_imm_ref(self.mk_region(env_region
), closure_ty
),
524 ty
::ClosureKind
::FnMut
=> self.mk_mut_ref(self.mk_region(env_region
), closure_ty
),
525 ty
::ClosureKind
::FnOnce
=> closure_ty
,
530 /// Returns `true` if the node pointed to by `def_id` is a `static` item.
532 pub fn is_static(self, def_id
: DefId
) -> bool
{
533 matches
!(self.def_kind(def_id
), DefKind
::Static(_
))
537 pub fn static_mutability(self, def_id
: DefId
) -> Option
<hir
::Mutability
> {
538 if let DefKind
::Static(mt
) = self.def_kind(def_id
) { Some(mt) }
else { None }
541 /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
542 pub fn is_thread_local_static(self, def_id
: DefId
) -> bool
{
543 self.codegen_fn_attrs(def_id
).flags
.contains(CodegenFnAttrFlags
::THREAD_LOCAL
)
546 /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
548 pub fn is_mutable_static(self, def_id
: DefId
) -> bool
{
549 self.static_mutability(def_id
) == Some(hir
::Mutability
::Mut
)
552 /// Get the type of the pointer to the static that we use in MIR.
553 pub fn static_ptr_ty(self, def_id
: DefId
) -> Ty
<'tcx
> {
554 // Make sure that any constants in the static's type are evaluated.
555 let static_ty
= self.normalize_erasing_regions(ty
::ParamEnv
::empty(), self.type_of(def_id
));
557 // Make sure that accesses to unsafe statics end up using raw pointers.
558 // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
559 if self.is_mutable_static(def_id
) {
560 self.mk_mut_ptr(static_ty
)
561 } else if self.is_foreign_item(def_id
) {
562 self.mk_imm_ptr(static_ty
)
564 self.mk_imm_ref(self.lifetimes
.re_erased
, static_ty
)
568 /// Expands the given impl trait type, stopping if the type is recursive.
569 #[instrument(skip(self), level = "debug")]
570 pub fn try_expand_impl_trait_type(
573 substs
: SubstsRef
<'tcx
>,
574 ) -> Result
<Ty
<'tcx
>, Ty
<'tcx
>> {
575 let mut visitor
= OpaqueTypeExpander
{
576 seen_opaque_tys
: FxHashSet
::default(),
577 expanded_cache
: FxHashMap
::default(),
578 primary_def_id
: Some(def_id
),
579 found_recursion
: false,
580 found_any_recursion
: false,
581 check_recursion
: true,
585 let expanded_type
= visitor
.expand_opaque_ty(def_id
, substs
).unwrap();
586 trace
!(?expanded_type
);
587 if visitor
.found_recursion { Err(expanded_type) }
else { Ok(expanded_type) }
591 struct OpaqueTypeExpander
<'tcx
> {
592 // Contains the DefIds of the opaque types that are currently being
593 // expanded. When we expand an opaque type we insert the DefId of
594 // that type, and when we finish expanding that type we remove the
596 seen_opaque_tys
: FxHashSet
<DefId
>,
597 // Cache of all expansions we've seen so far. This is a critical
598 // optimization for some large types produced by async fn trees.
599 expanded_cache
: FxHashMap
<(DefId
, SubstsRef
<'tcx
>), Ty
<'tcx
>>,
600 primary_def_id
: Option
<DefId
>,
601 found_recursion
: bool
,
602 found_any_recursion
: bool
,
603 /// Whether or not to check for recursive opaque types.
604 /// This is `true` when we're explicitly checking for opaque type
605 /// recursion, and 'false' otherwise to avoid unnecessary work.
606 check_recursion
: bool
,
610 impl<'tcx
> OpaqueTypeExpander
<'tcx
> {
611 fn expand_opaque_ty(&mut self, def_id
: DefId
, substs
: SubstsRef
<'tcx
>) -> Option
<Ty
<'tcx
>> {
612 if self.found_any_recursion
{
615 let substs
= substs
.fold_with(self);
616 if !self.check_recursion
|| self.seen_opaque_tys
.insert(def_id
) {
617 let expanded_ty
= match self.expanded_cache
.get(&(def_id
, substs
)) {
618 Some(expanded_ty
) => *expanded_ty
,
620 let generic_ty
= self.tcx
.type_of(def_id
);
621 let concrete_ty
= generic_ty
.subst(self.tcx
, substs
);
622 let expanded_ty
= self.fold_ty(concrete_ty
);
623 self.expanded_cache
.insert((def_id
, substs
), expanded_ty
);
627 if self.check_recursion
{
628 self.seen_opaque_tys
.remove(&def_id
);
632 // If another opaque type that we contain is recursive, then it
633 // will report the error, so we don't have to.
634 self.found_any_recursion
= true;
635 self.found_recursion
= def_id
== *self.primary_def_id
.as_ref().unwrap();
641 impl<'tcx
> TypeFolder
<'tcx
> for OpaqueTypeExpander
<'tcx
> {
642 fn tcx(&self) -> TyCtxt
<'tcx
> {
646 fn fold_ty(&mut self, t
: Ty
<'tcx
>) -> Ty
<'tcx
> {
647 if let ty
::Opaque(def_id
, substs
) = *t
.kind() {
648 self.expand_opaque_ty(def_id
, substs
).unwrap_or(t
)
649 } else if t
.has_opaque_types() {
650 t
.super_fold_with(self)
657 impl<'tcx
> Ty
<'tcx
> {
658 /// Returns the maximum value for the given numeric type (including `char`s)
659 /// or returns `None` if the type is not numeric.
660 pub fn numeric_max_val(self, tcx
: TyCtxt
<'tcx
>) -> Option
<Const
<'tcx
>> {
661 let val
= match self.kind() {
662 ty
::Int(_
) | ty
::Uint(_
) => {
663 let (size
, signed
) = int_size_and_signed(tcx
, self);
665 if signed { size.signed_int_max() as u128 }
else { size.unsigned_int_max() }
;
668 ty
::Char
=> Some(std
::char::MAX
as u128
),
669 ty
::Float(fty
) => Some(match fty
{
670 ty
::FloatTy
::F32
=> rustc_apfloat
::ieee
::Single
::INFINITY
.to_bits(),
671 ty
::FloatTy
::F64
=> rustc_apfloat
::ieee
::Double
::INFINITY
.to_bits(),
675 val
.map(|v
| Const
::from_bits(tcx
, v
, ty
::ParamEnv
::empty().and(self)))
678 /// Returns the minimum value for the given numeric type (including `char`s)
679 /// or returns `None` if the type is not numeric.
680 pub fn numeric_min_val(self, tcx
: TyCtxt
<'tcx
>) -> Option
<Const
<'tcx
>> {
681 let val
= match self.kind() {
682 ty
::Int(_
) | ty
::Uint(_
) => {
683 let (size
, signed
) = int_size_and_signed(tcx
, self);
684 let val
= if signed { size.truncate(size.signed_int_min() as u128) }
else { 0 }
;
688 ty
::Float(fty
) => Some(match fty
{
689 ty
::FloatTy
::F32
=> (-::rustc_apfloat
::ieee
::Single
::INFINITY
).to_bits(),
690 ty
::FloatTy
::F64
=> (-::rustc_apfloat
::ieee
::Double
::INFINITY
).to_bits(),
694 val
.map(|v
| Const
::from_bits(tcx
, v
, ty
::ParamEnv
::empty().and(self)))
697 /// Checks whether values of this type `T` are *moved* or *copied*
698 /// when referenced -- this amounts to a check for whether `T:
699 /// Copy`, but note that we **don't** consider lifetimes when
700 /// doing this check. This means that we may generate MIR which
701 /// does copies even when the type actually doesn't satisfy the
702 /// full requirements for the `Copy` trait (cc #29149) -- this
703 /// winds up being reported as an error during NLL borrow check.
704 pub fn is_copy_modulo_regions(
706 tcx_at
: TyCtxtAt
<'tcx
>,
707 param_env
: ty
::ParamEnv
<'tcx
>,
709 self.is_trivially_pure_clone_copy() || tcx_at
.is_copy_raw(param_env
.and(self))
712 /// Checks whether values of this type `T` have a size known at
713 /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
714 /// for the purposes of this check, so it can be an
715 /// over-approximation in generic contexts, where one can have
716 /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
717 /// actually carry lifetime requirements.
718 pub fn is_sized(self, tcx_at
: TyCtxtAt
<'tcx
>, param_env
: ty
::ParamEnv
<'tcx
>) -> bool
{
719 self.is_trivially_sized(tcx_at
.tcx
) || tcx_at
.is_sized_raw(param_env
.and(self))
722 /// Checks whether values of this type `T` implement the `Freeze`
723 /// trait -- frozen types are those that do not contain an
724 /// `UnsafeCell` anywhere. This is a language concept used to
725 /// distinguish "true immutability", which is relevant to
726 /// optimization as well as the rules around static values. Note
727 /// that the `Freeze` trait is not exposed to end users and is
728 /// effectively an implementation detail.
729 pub fn is_freeze(self, tcx_at
: TyCtxtAt
<'tcx
>, param_env
: ty
::ParamEnv
<'tcx
>) -> bool
{
730 self.is_trivially_freeze() || tcx_at
.is_freeze_raw(param_env
.and(self))
733 /// Fast path helper for testing if a type is `Freeze`.
735 /// Returning true means the type is known to be `Freeze`. Returning
736 /// `false` means nothing -- could be `Freeze`, might not be.
737 fn is_trivially_freeze(self) -> bool
{
750 | ty
::FnPtr(_
) => true,
751 ty
::Tuple(fields
) => fields
.iter().all(Self::is_trivially_freeze
),
752 ty
::Slice(elem_ty
) | ty
::Array(elem_ty
, _
) => elem_ty
.is_trivially_freeze(),
759 | ty
::GeneratorWitness(_
)
764 | ty
::Projection(_
) => false,
768 /// Checks whether values of this type `T` implement the `Unpin` trait.
769 pub fn is_unpin(self, tcx_at
: TyCtxtAt
<'tcx
>, param_env
: ty
::ParamEnv
<'tcx
>) -> bool
{
770 self.is_trivially_unpin() || tcx_at
.is_unpin_raw(param_env
.and(self))
773 /// Fast path helper for testing if a type is `Unpin`.
775 /// Returning true means the type is known to be `Unpin`. Returning
776 /// `false` means nothing -- could be `Unpin`, might not be.
777 fn is_trivially_unpin(self) -> bool
{
790 | ty
::FnPtr(_
) => true,
791 ty
::Tuple(fields
) => fields
.iter().all(Self::is_trivially_unpin
),
792 ty
::Slice(elem_ty
) | ty
::Array(elem_ty
, _
) => elem_ty
.is_trivially_unpin(),
799 | ty
::GeneratorWitness(_
)
804 | ty
::Projection(_
) => false,
808 /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
809 /// non-copy and *might* have a destructor attached; if it returns
810 /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
812 /// (Note that this implies that if `ty` has a destructor attached,
813 /// then `needs_drop` will definitely return `true` for `ty`.)
815 /// Note that this method is used to check eligible types in unions.
817 pub fn needs_drop(self, tcx
: TyCtxt
<'tcx
>, param_env
: ty
::ParamEnv
<'tcx
>) -> bool
{
818 // Avoid querying in simple cases.
819 match needs_drop_components(self, &tcx
.data_layout
) {
820 Err(AlwaysRequiresDrop
) => true,
822 let query_ty
= match *components
{
824 // If we've got a single component, call the query with that
825 // to increase the chance that we hit the query cache.
826 [component_ty
] => component_ty
,
830 // This doesn't depend on regions, so try to minimize distinct
832 // If normalization fails, we just use `query_ty`.
834 tcx
.try_normalize_erasing_regions(param_env
, query_ty
).unwrap_or(query_ty
);
836 tcx
.needs_drop_raw(param_env
.and(query_ty
))
841 /// Checks if `ty` has has a significant drop.
843 /// Note that this method can return false even if `ty` has a destructor
844 /// attached; even if that is the case then the adt has been marked with
845 /// the attribute `rustc_insignificant_dtor`.
847 /// Note that this method is used to check for change in drop order for
848 /// 2229 drop reorder migration analysis.
850 pub fn has_significant_drop(self, tcx
: TyCtxt
<'tcx
>, param_env
: ty
::ParamEnv
<'tcx
>) -> bool
{
851 // Avoid querying in simple cases.
852 match needs_drop_components(self, &tcx
.data_layout
) {
853 Err(AlwaysRequiresDrop
) => true,
855 let query_ty
= match *components
{
857 // If we've got a single component, call the query with that
858 // to increase the chance that we hit the query cache.
859 [component_ty
] => component_ty
,
863 // FIXME(#86868): We should be canonicalizing, or else moving this to a method of inference
864 // context, or *something* like that, but for now just avoid passing inference
865 // variables to queries that can't cope with them. Instead, conservatively
866 // return "true" (may change drop order).
867 if query_ty
.needs_infer() {
871 // This doesn't depend on regions, so try to minimize distinct
873 let erased
= tcx
.normalize_erasing_regions(param_env
, query_ty
);
874 tcx
.has_significant_drop_raw(param_env
.and(erased
))
879 /// Returns `true` if equality for this type is both reflexive and structural.
881 /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
883 /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
884 /// types, equality for the type as a whole is structural when it is the same as equality
885 /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
886 /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
889 /// This function is "shallow" because it may return `true` for a composite type whose fields
890 /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
891 /// because equality for arrays is determined by the equality of each array element. If you
892 /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
893 /// down, you will need to use a type visitor.
895 pub fn is_structural_eq_shallow(self, tcx
: TyCtxt
<'tcx
>) -> bool
{
897 // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
898 Adt(..) => tcx
.has_structural_eq_impls(self),
900 // Primitive types that satisfy `Eq`.
901 Bool
| Char
| Int(_
) | Uint(_
) | Str
| Never
=> true,
903 // Composite types that satisfy `Eq` when all of their fields do.
905 // Because this function is "shallow", we return `true` for these composites regardless
906 // of the type(s) contained within.
907 Ref(..) | Array(..) | Slice(_
) | Tuple(..) => true,
909 // Raw pointers use bitwise comparison.
910 RawPtr(_
) | FnPtr(_
) => true,
912 // Floating point numbers are not `Eq`.
915 // Conservatively return `false` for all others...
917 // Anonymous function types
918 FnDef(..) | Closure(..) | Dynamic(..) | Generator(..) => false,
920 // Generic or inferred types
922 // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
923 // called for known, fully-monomorphized types.
924 Projection(_
) | Opaque(..) | Param(_
) | Bound(..) | Placeholder(_
) | Infer(_
) => false,
926 Foreign(_
) | GeneratorWitness(..) | Error(_
) => false,
930 /// Peel off all reference types in this type until there are none left.
932 /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
937 /// - `&'a mut u8` -> `u8`
938 /// - `&'a &'b u8` -> `u8`
939 /// - `&'a *const &'b u8 -> *const &'b u8`
940 pub fn peel_refs(self) -> Ty
<'tcx
> {
942 while let Ref(_
, inner_ty
, _
) = ty
.kind() {
948 pub fn outer_exclusive_binder(self) -> DebruijnIndex
{
949 self.0.outer_exclusive_binder
953 pub enum ExplicitSelf
<'tcx
> {
955 ByReference(ty
::Region
<'tcx
>, hir
::Mutability
),
956 ByRawPointer(hir
::Mutability
),
961 impl<'tcx
> ExplicitSelf
<'tcx
> {
962 /// Categorizes an explicit self declaration like `self: SomeType`
963 /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
965 /// This is mainly used to require the arbitrary_self_types feature
966 /// in the case of `Other`, to improve error messages in the common cases,
967 /// and to make `Other` non-object-safe.
972 /// impl<'a> Foo for &'a T {
973 /// // Legal declarations:
974 /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
975 /// fn method2(self: &'a T); // ExplicitSelf::ByValue
976 /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
977 /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
979 /// // Invalid cases will be caught by `check_method_receiver`:
980 /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
981 /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
982 /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
986 pub fn determine
<P
>(self_arg_ty
: Ty
<'tcx
>, is_self_ty
: P
) -> ExplicitSelf
<'tcx
>
988 P
: Fn(Ty
<'tcx
>) -> bool
,
990 use self::ExplicitSelf
::*;
992 match *self_arg_ty
.kind() {
993 _
if is_self_ty(self_arg_ty
) => ByValue
,
994 ty
::Ref(region
, ty
, mutbl
) if is_self_ty(ty
) => ByReference(region
, mutbl
),
995 ty
::RawPtr(ty
::TypeAndMut { ty, mutbl }
) if is_self_ty(ty
) => ByRawPointer(mutbl
),
996 ty
::Adt(def
, _
) if def
.is_box() && is_self_ty(self_arg_ty
.boxed_ty()) => ByBox
,
1002 /// Returns a list of types such that the given type needs drop if and only if
1003 /// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
1004 /// this type always needs drop.
1005 pub fn needs_drop_components
<'tcx
>(
1007 target_layout
: &TargetDataLayout
,
1008 ) -> Result
<SmallVec
<[Ty
<'tcx
>; 2]>, AlwaysRequiresDrop
> {
1010 ty
::Infer(ty
::FreshIntTy(_
))
1011 | ty
::Infer(ty
::FreshFloatTy(_
))
1020 | ty
::GeneratorWitness(..)
1023 | ty
::Str
=> Ok(SmallVec
::new()),
1025 // Foreign types can never have destructors.
1026 ty
::Foreign(..) => Ok(SmallVec
::new()),
1028 ty
::Dynamic(..) | ty
::Error(_
) => Err(AlwaysRequiresDrop
),
1030 ty
::Slice(ty
) => needs_drop_components(*ty
, target_layout
),
1031 ty
::Array(elem_ty
, size
) => {
1032 match needs_drop_components(*elem_ty
, target_layout
) {
1033 Ok(v
) if v
.is_empty() => Ok(v
),
1034 res
=> match size
.val().try_to_bits(target_layout
.pointer_size
) {
1035 // Arrays of size zero don't need drop, even if their element
1037 Some(0) => Ok(SmallVec
::new()),
1039 // We don't know which of the cases above we are in, so
1040 // return the whole type and let the caller decide what to
1042 None
=> Ok(smallvec
![ty
]),
1046 // If any field needs drop, then the whole tuple does.
1047 ty
::Tuple(fields
) => fields
.iter().try_fold(SmallVec
::new(), move |mut acc
, elem
| {
1048 acc
.extend(needs_drop_components(elem
, target_layout
)?
);
1052 // These require checking for `Copy` bounds or `Adt` destructors.
1054 | ty
::Projection(..)
1057 | ty
::Placeholder(..)
1061 | ty
::Generator(..) => Ok(smallvec
![ty
]),
1065 pub fn is_trivially_const_drop
<'tcx
>(ty
: Ty
<'tcx
>) -> bool
{
1072 | ty
::Infer(ty
::IntVar(_
))
1073 | ty
::Infer(ty
::FloatVar(_
))
1080 | ty
::Foreign(_
) => true,
1087 | ty
::Placeholder(_
)
1089 | ty
::Infer(_
) => false,
1091 // Not trivial because they have components, and instead of looking inside,
1092 // we'll just perform trait selection.
1093 ty
::Closure(..) | ty
::Generator(..) | ty
::GeneratorWitness(_
) | ty
::Adt(..) => false,
1095 ty
::Array(ty
, _
) | ty
::Slice(ty
) => is_trivially_const_drop(ty
),
1097 ty
::Tuple(tys
) => tys
.iter().all(|ty
| is_trivially_const_drop(ty
)),
1101 // Does the equivalent of
1103 // let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1104 // folder.tcx().intern_*(&v)
1106 pub fn fold_list
<'tcx
, F
, T
>(
1107 list
: &'tcx ty
::List
<T
>,
1109 intern
: impl FnOnce(TyCtxt
<'tcx
>, &[T
]) -> &'tcx ty
::List
<T
>,
1110 ) -> Result
<&'tcx ty
::List
<T
>, F
::Error
>
1112 F
: FallibleTypeFolder
<'tcx
>,
1113 T
: TypeFoldable
<'tcx
> + PartialEq
+ Copy
,
1115 let mut iter
= list
.iter();
1116 // Look for the first element that changed
1117 match iter
.by_ref().enumerate().find_map(|(i
, t
)| match t
.try_fold_with(folder
) {
1118 Ok(new_t
) if new_t
== t
=> None
,
1119 new_t
=> Some((i
, new_t
)),
1121 Some((i
, Ok(new_t
))) => {
1122 // An element changed, prepare to intern the resulting list
1123 let mut new_list
= SmallVec
::<[_
; 8]>::with_capacity(list
.len());
1124 new_list
.extend_from_slice(&list
[..i
]);
1125 new_list
.push(new_t
);
1127 new_list
.push(t
.try_fold_with(folder
)?
)
1129 Ok(intern(folder
.tcx(), &new_list
))
1131 Some((_
, Err(err
))) => {
1138 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
1139 pub struct AlwaysRequiresDrop
;
1141 /// Normalizes all opaque types in the given value, replacing them
1142 /// with their underlying types.
1143 pub fn normalize_opaque_types
<'tcx
>(
1145 val
: &'tcx List
<ty
::Predicate
<'tcx
>>,
1146 ) -> &'tcx List
<ty
::Predicate
<'tcx
>> {
1147 let mut visitor
= OpaqueTypeExpander
{
1148 seen_opaque_tys
: FxHashSet
::default(),
1149 expanded_cache
: FxHashMap
::default(),
1150 primary_def_id
: None
,
1151 found_recursion
: false,
1152 found_any_recursion
: false,
1153 check_recursion
: false,
1156 val
.fold_with(&mut visitor
)
1159 /// Determines whether an item is annotated with `doc(hidden)`.
1160 pub fn is_doc_hidden(tcx
: TyCtxt
<'_
>, def_id
: DefId
) -> bool
{
1161 tcx
.get_attrs(def_id
)
1163 .filter_map(|attr
| if attr
.has_name(sym
::doc
) { attr.meta_item_list() }
else { None }
)
1164 .any(|items
| items
.iter().any(|item
| item
.has_name(sym
::hidden
)))
1167 pub fn provide(providers
: &mut ty
::query
::Providers
) {
1168 *providers
= ty
::query
::Providers { normalize_opaque_types, is_doc_hidden, ..*providers }