]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_trait_selection/src/traits/select/mod.rs
a91f693f17596af927b5b453843de89ab7276773
[rustc.git] / compiler / rustc_trait_selection / src / traits / select / mod.rs
1 //! Candidate selection. See the [rustc dev guide] for more information on how this works.
2 //!
3 //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
4
5 use self::EvaluationResult::*;
6 use self::SelectionCandidate::*;
7
8 use super::coherence::{self, Conflict};
9 use super::const_evaluatable;
10 use super::project;
11 use super::project::normalize_with_depth_to;
12 use super::project::ProjectionTyObligation;
13 use super::util;
14 use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
15 use super::wf;
16 use super::DerivedObligationCause;
17 use super::Obligation;
18 use super::ObligationCauseCode;
19 use super::Selection;
20 use super::SelectionResult;
21 use super::TraitQueryMode;
22 use super::{Normalized, ProjectionCacheKey};
23 use super::{ObligationCause, PredicateObligation, TraitObligation};
24 use super::{Overflow, SelectionError, Unimplemented};
25
26 use crate::infer::{InferCtxt, InferOk, TypeFreshener};
27 use crate::traits::error_reporting::InferCtxtExt;
28 use crate::traits::project::ProjectionCacheKeyExt;
29 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
30 use rustc_data_structures::stack::ensure_sufficient_stack;
31 use rustc_errors::ErrorReported;
32 use rustc_hir as hir;
33 use rustc_hir::def_id::DefId;
34 use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
35 use rustc_middle::mir::interpret::ErrorHandled;
36 use rustc_middle::ty::fast_reject;
37 use rustc_middle::ty::print::with_no_trimmed_paths;
38 use rustc_middle::ty::relate::TypeRelation;
39 use rustc_middle::ty::subst::{GenericArgKind, Subst, SubstsRef};
40 use rustc_middle::ty::{self, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate};
41 use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, WithConstness};
42 use rustc_span::symbol::sym;
43
44 use std::cell::{Cell, RefCell};
45 use std::cmp;
46 use std::fmt::{self, Display};
47 use std::iter;
48 use std::rc::Rc;
49
50 pub use rustc_middle::traits::select::*;
51
52 mod candidate_assembly;
53 mod confirmation;
54
55 #[derive(Clone, Debug)]
56 pub enum IntercrateAmbiguityCause {
57 DownstreamCrate { trait_desc: String, self_desc: Option<String> },
58 UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String> },
59 ReservationImpl { message: String },
60 }
61
62 impl IntercrateAmbiguityCause {
63 /// Emits notes when the overlap is caused by complex intercrate ambiguities.
64 /// See #23980 for details.
65 pub fn add_intercrate_ambiguity_hint(&self, err: &mut rustc_errors::DiagnosticBuilder<'_>) {
66 err.note(&self.intercrate_ambiguity_hint());
67 }
68
69 pub fn intercrate_ambiguity_hint(&self) -> String {
70 match self {
71 &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => {
72 let self_desc = if let &Some(ref ty) = self_desc {
73 format!(" for type `{}`", ty)
74 } else {
75 String::new()
76 };
77 format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
78 }
79 &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => {
80 let self_desc = if let &Some(ref ty) = self_desc {
81 format!(" for type `{}`", ty)
82 } else {
83 String::new()
84 };
85 format!(
86 "upstream crates may add a new impl of trait `{}`{} \
87 in future versions",
88 trait_desc, self_desc
89 )
90 }
91 &IntercrateAmbiguityCause::ReservationImpl { ref message } => message.clone(),
92 }
93 }
94 }
95
96 pub struct SelectionContext<'cx, 'tcx> {
97 infcx: &'cx InferCtxt<'cx, 'tcx>,
98
99 /// Freshener used specifically for entries on the obligation
100 /// stack. This ensures that all entries on the stack at one time
101 /// will have the same set of placeholder entries, which is
102 /// important for checking for trait bounds that recursively
103 /// require themselves.
104 freshener: TypeFreshener<'cx, 'tcx>,
105
106 /// If `true`, indicates that the evaluation should be conservative
107 /// and consider the possibility of types outside this crate.
108 /// This comes up primarily when resolving ambiguity. Imagine
109 /// there is some trait reference `$0: Bar` where `$0` is an
110 /// inference variable. If `intercrate` is true, then we can never
111 /// say for sure that this reference is not implemented, even if
112 /// there are *no impls at all for `Bar`*, because `$0` could be
113 /// bound to some type that in a downstream crate that implements
114 /// `Bar`. This is the suitable mode for coherence. Elsewhere,
115 /// though, we set this to false, because we are only interested
116 /// in types that the user could actually have written --- in
117 /// other words, we consider `$0: Bar` to be unimplemented if
118 /// there is no type that the user could *actually name* that
119 /// would satisfy it. This avoids crippling inference, basically.
120 intercrate: bool,
121
122 intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
123
124 /// Controls whether or not to filter out negative impls when selecting.
125 /// This is used in librustdoc to distinguish between the lack of an impl
126 /// and a negative impl
127 allow_negative_impls: bool,
128
129 /// The mode that trait queries run in, which informs our error handling
130 /// policy. In essence, canonicalized queries need their errors propagated
131 /// rather than immediately reported because we do not have accurate spans.
132 query_mode: TraitQueryMode,
133 }
134
135 // A stack that walks back up the stack frame.
136 struct TraitObligationStack<'prev, 'tcx> {
137 obligation: &'prev TraitObligation<'tcx>,
138
139 /// The trait ref from `obligation` but "freshened" with the
140 /// selection-context's freshener. Used to check for recursion.
141 fresh_trait_ref: ty::PolyTraitRef<'tcx>,
142
143 /// Starts out equal to `depth` -- if, during evaluation, we
144 /// encounter a cycle, then we will set this flag to the minimum
145 /// depth of that cycle for all participants in the cycle. These
146 /// participants will then forego caching their results. This is
147 /// not the most efficient solution, but it addresses #60010. The
148 /// problem we are trying to prevent:
149 ///
150 /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
151 /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
152 /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
153 ///
154 /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
155 /// is `EvaluatedToOk`; this is because they were only considered
156 /// ok on the premise that if `A: AutoTrait` held, but we indeed
157 /// encountered a problem (later on) with `A: AutoTrait. So we
158 /// currently set a flag on the stack node for `B: AutoTrait` (as
159 /// well as the second instance of `A: AutoTrait`) to suppress
160 /// caching.
161 ///
162 /// This is a simple, targeted fix. A more-performant fix requires
163 /// deeper changes, but would permit more caching: we could
164 /// basically defer caching until we have fully evaluated the
165 /// tree, and then cache the entire tree at once. In any case, the
166 /// performance impact here shouldn't be so horrible: every time
167 /// this is hit, we do cache at least one trait, so we only
168 /// evaluate each member of a cycle up to N times, where N is the
169 /// length of the cycle. This means the performance impact is
170 /// bounded and we shouldn't have any terrible worst-cases.
171 reached_depth: Cell<usize>,
172
173 previous: TraitObligationStackList<'prev, 'tcx>,
174
175 /// The number of parent frames plus one (thus, the topmost frame has depth 1).
176 depth: usize,
177
178 /// The depth-first number of this node in the search graph -- a
179 /// pre-order index. Basically, a freshly incremented counter.
180 dfn: usize,
181 }
182
183 struct SelectionCandidateSet<'tcx> {
184 // A list of candidates that definitely apply to the current
185 // obligation (meaning: types unify).
186 vec: Vec<SelectionCandidate<'tcx>>,
187
188 // If `true`, then there were candidates that might or might
189 // not have applied, but we couldn't tell. This occurs when some
190 // of the input types are type variables, in which case there are
191 // various "builtin" rules that might or might not trigger.
192 ambiguous: bool,
193 }
194
195 #[derive(PartialEq, Eq, Debug, Clone)]
196 struct EvaluatedCandidate<'tcx> {
197 candidate: SelectionCandidate<'tcx>,
198 evaluation: EvaluationResult,
199 }
200
201 /// When does the builtin impl for `T: Trait` apply?
202 enum BuiltinImplConditions<'tcx> {
203 /// The impl is conditional on `T1, T2, ...: Trait`.
204 Where(ty::Binder<Vec<Ty<'tcx>>>),
205 /// There is no built-in impl. There may be some other
206 /// candidate (a where-clause or user-defined impl).
207 None,
208 /// It is unknown whether there is an impl.
209 Ambiguous,
210 }
211
212 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
213 pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
214 SelectionContext {
215 infcx,
216 freshener: infcx.freshener(),
217 intercrate: false,
218 intercrate_ambiguity_causes: None,
219 allow_negative_impls: false,
220 query_mode: TraitQueryMode::Standard,
221 }
222 }
223
224 pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
225 SelectionContext {
226 infcx,
227 freshener: infcx.freshener(),
228 intercrate: true,
229 intercrate_ambiguity_causes: None,
230 allow_negative_impls: false,
231 query_mode: TraitQueryMode::Standard,
232 }
233 }
234
235 pub fn with_negative(
236 infcx: &'cx InferCtxt<'cx, 'tcx>,
237 allow_negative_impls: bool,
238 ) -> SelectionContext<'cx, 'tcx> {
239 debug!(?allow_negative_impls, "with_negative");
240 SelectionContext {
241 infcx,
242 freshener: infcx.freshener(),
243 intercrate: false,
244 intercrate_ambiguity_causes: None,
245 allow_negative_impls,
246 query_mode: TraitQueryMode::Standard,
247 }
248 }
249
250 pub fn with_query_mode(
251 infcx: &'cx InferCtxt<'cx, 'tcx>,
252 query_mode: TraitQueryMode,
253 ) -> SelectionContext<'cx, 'tcx> {
254 debug!(?query_mode, "with_query_mode");
255 SelectionContext {
256 infcx,
257 freshener: infcx.freshener(),
258 intercrate: false,
259 intercrate_ambiguity_causes: None,
260 allow_negative_impls: false,
261 query_mode,
262 }
263 }
264
265 /// Enables tracking of intercrate ambiguity causes. These are
266 /// used in coherence to give improved diagnostics. We don't do
267 /// this until we detect a coherence error because it can lead to
268 /// false overflow results (#47139) and because it costs
269 /// computation time.
270 pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
271 assert!(self.intercrate);
272 assert!(self.intercrate_ambiguity_causes.is_none());
273 self.intercrate_ambiguity_causes = Some(vec![]);
274 debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
275 }
276
277 /// Gets the intercrate ambiguity causes collected since tracking
278 /// was enabled and disables tracking at the same time. If
279 /// tracking is not enabled, just returns an empty vector.
280 pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
281 assert!(self.intercrate);
282 self.intercrate_ambiguity_causes.take().unwrap_or_default()
283 }
284
285 pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
286 self.infcx
287 }
288
289 pub fn tcx(&self) -> TyCtxt<'tcx> {
290 self.infcx.tcx
291 }
292
293 ///////////////////////////////////////////////////////////////////////////
294 // Selection
295 //
296 // The selection phase tries to identify *how* an obligation will
297 // be resolved. For example, it will identify which impl or
298 // parameter bound is to be used. The process can be inconclusive
299 // if the self type in the obligation is not fully inferred. Selection
300 // can result in an error in one of two ways:
301 //
302 // 1. If no applicable impl or parameter bound can be found.
303 // 2. If the output type parameters in the obligation do not match
304 // those specified by the impl/bound. For example, if the obligation
305 // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
306 // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
307
308 /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
309 /// type environment by performing unification.
310 #[instrument(level = "debug", skip(self))]
311 pub fn select(
312 &mut self,
313 obligation: &TraitObligation<'tcx>,
314 ) -> SelectionResult<'tcx, Selection<'tcx>> {
315 debug_assert!(!obligation.predicate.has_escaping_bound_vars());
316
317 let pec = &ProvisionalEvaluationCache::default();
318 let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
319
320 let candidate = match self.candidate_from_obligation(&stack) {
321 Err(SelectionError::Overflow) => {
322 // In standard mode, overflow must have been caught and reported
323 // earlier.
324 assert!(self.query_mode == TraitQueryMode::Canonical);
325 return Err(SelectionError::Overflow);
326 }
327 Err(e) => {
328 return Err(e);
329 }
330 Ok(None) => {
331 return Ok(None);
332 }
333 Ok(Some(candidate)) => candidate,
334 };
335
336 match self.confirm_candidate(obligation, candidate) {
337 Err(SelectionError::Overflow) => {
338 assert!(self.query_mode == TraitQueryMode::Canonical);
339 Err(SelectionError::Overflow)
340 }
341 Err(e) => Err(e),
342 Ok(candidate) => {
343 debug!(?candidate);
344 Ok(Some(candidate))
345 }
346 }
347 }
348
349 ///////////////////////////////////////////////////////////////////////////
350 // EVALUATION
351 //
352 // Tests whether an obligation can be selected or whether an impl
353 // can be applied to particular types. It skips the "confirmation"
354 // step and hence completely ignores output type parameters.
355 //
356 // The result is "true" if the obligation *may* hold and "false" if
357 // we can be sure it does not.
358
359 /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
360 pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
361 debug!(?obligation, "predicate_may_hold_fatal");
362
363 // This fatal query is a stopgap that should only be used in standard mode,
364 // where we do not expect overflow to be propagated.
365 assert!(self.query_mode == TraitQueryMode::Standard);
366
367 self.evaluate_root_obligation(obligation)
368 .expect("Overflow should be caught earlier in standard query mode")
369 .may_apply()
370 }
371
372 /// Evaluates whether the obligation `obligation` can be satisfied
373 /// and returns an `EvaluationResult`. This is meant for the
374 /// *initial* call.
375 pub fn evaluate_root_obligation(
376 &mut self,
377 obligation: &PredicateObligation<'tcx>,
378 ) -> Result<EvaluationResult, OverflowError> {
379 self.evaluation_probe(|this| {
380 this.evaluate_predicate_recursively(
381 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
382 obligation.clone(),
383 )
384 })
385 }
386
387 fn evaluation_probe(
388 &mut self,
389 op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
390 ) -> Result<EvaluationResult, OverflowError> {
391 self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
392 let result = op(self)?;
393
394 match self.infcx.leak_check(true, snapshot) {
395 Ok(()) => {}
396 Err(_) => return Ok(EvaluatedToErr),
397 }
398
399 match self.infcx.region_constraints_added_in_snapshot(snapshot) {
400 None => Ok(result),
401 Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)),
402 }
403 })
404 }
405
406 /// Evaluates the predicates in `predicates` recursively. Note that
407 /// this applies projections in the predicates, and therefore
408 /// is run within an inference probe.
409 fn evaluate_predicates_recursively<'o, I>(
410 &mut self,
411 stack: TraitObligationStackList<'o, 'tcx>,
412 predicates: I,
413 ) -> Result<EvaluationResult, OverflowError>
414 where
415 I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
416 {
417 let mut result = EvaluatedToOk;
418 debug!(?predicates, "evaluate_predicates_recursively");
419 for obligation in predicates {
420 let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
421 if let EvaluatedToErr = eval {
422 // fast-path - EvaluatedToErr is the top of the lattice,
423 // so we don't need to look on the other predicates.
424 return Ok(EvaluatedToErr);
425 } else {
426 result = cmp::max(result, eval);
427 }
428 }
429 Ok(result)
430 }
431
432 #[instrument(
433 level = "debug",
434 skip(self, previous_stack),
435 fields(previous_stack = ?previous_stack.head())
436 )]
437 fn evaluate_predicate_recursively<'o>(
438 &mut self,
439 previous_stack: TraitObligationStackList<'o, 'tcx>,
440 obligation: PredicateObligation<'tcx>,
441 ) -> Result<EvaluationResult, OverflowError> {
442 // `previous_stack` stores a `TraitObligation`, while `obligation` is
443 // a `PredicateObligation`. These are distinct types, so we can't
444 // use any `Option` combinator method that would force them to be
445 // the same.
446 match previous_stack.head() {
447 Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
448 None => self.check_recursion_limit(&obligation, &obligation)?,
449 }
450
451 let result = ensure_sufficient_stack(|| {
452 let bound_predicate = obligation.predicate.bound_atom();
453 match bound_predicate.skip_binder() {
454 ty::PredicateAtom::Trait(t, _) => {
455 let t = bound_predicate.rebind(t);
456 debug_assert!(!t.has_escaping_bound_vars());
457 let obligation = obligation.with(t);
458 self.evaluate_trait_predicate_recursively(previous_stack, obligation)
459 }
460
461 ty::PredicateAtom::Subtype(p) => {
462 let p = bound_predicate.rebind(p);
463 // Does this code ever run?
464 match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
465 Some(Ok(InferOk { mut obligations, .. })) => {
466 self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
467 self.evaluate_predicates_recursively(
468 previous_stack,
469 obligations.into_iter(),
470 )
471 }
472 Some(Err(_)) => Ok(EvaluatedToErr),
473 None => Ok(EvaluatedToAmbig),
474 }
475 }
476
477 ty::PredicateAtom::WellFormed(arg) => match wf::obligations(
478 self.infcx,
479 obligation.param_env,
480 obligation.cause.body_id,
481 obligation.recursion_depth + 1,
482 arg,
483 obligation.cause.span,
484 ) {
485 Some(mut obligations) => {
486 self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
487 self.evaluate_predicates_recursively(previous_stack, obligations)
488 }
489 None => Ok(EvaluatedToAmbig),
490 },
491
492 ty::PredicateAtom::TypeOutlives(..) | ty::PredicateAtom::RegionOutlives(..) => {
493 // We do not consider region relationships when evaluating trait matches.
494 Ok(EvaluatedToOkModuloRegions)
495 }
496
497 ty::PredicateAtom::ObjectSafe(trait_def_id) => {
498 if self.tcx().is_object_safe(trait_def_id) {
499 Ok(EvaluatedToOk)
500 } else {
501 Ok(EvaluatedToErr)
502 }
503 }
504
505 ty::PredicateAtom::Projection(data) => {
506 let data = bound_predicate.rebind(data);
507 let project_obligation = obligation.with(data);
508 match project::poly_project_and_unify_type(self, &project_obligation) {
509 Ok(Ok(Some(mut subobligations))) => {
510 self.add_depth(subobligations.iter_mut(), obligation.recursion_depth);
511 let result = self
512 .evaluate_predicates_recursively(previous_stack, subobligations);
513 if let Some(key) =
514 ProjectionCacheKey::from_poly_projection_predicate(self, data)
515 {
516 self.infcx.inner.borrow_mut().projection_cache().complete(key);
517 }
518 result
519 }
520 Ok(Ok(None)) => Ok(EvaluatedToAmbig),
521 Ok(Err(project::InProgress)) => Ok(EvaluatedToRecur),
522 Err(_) => Ok(EvaluatedToErr),
523 }
524 }
525
526 ty::PredicateAtom::ClosureKind(_, closure_substs, kind) => {
527 match self.infcx.closure_kind(closure_substs) {
528 Some(closure_kind) => {
529 if closure_kind.extends(kind) {
530 Ok(EvaluatedToOk)
531 } else {
532 Ok(EvaluatedToErr)
533 }
534 }
535 None => Ok(EvaluatedToAmbig),
536 }
537 }
538
539 ty::PredicateAtom::ConstEvaluatable(def_id, substs) => {
540 match const_evaluatable::is_const_evaluatable(
541 self.infcx,
542 def_id,
543 substs,
544 obligation.param_env,
545 obligation.cause.span,
546 ) {
547 Ok(()) => Ok(EvaluatedToOk),
548 Err(ErrorHandled::TooGeneric) => Ok(EvaluatedToAmbig),
549 Err(_) => Ok(EvaluatedToErr),
550 }
551 }
552
553 ty::PredicateAtom::ConstEquate(c1, c2) => {
554 debug!(?c1, ?c2, "evaluate_predicate_recursively: equating consts");
555
556 let evaluate = |c: &'tcx ty::Const<'tcx>| {
557 if let ty::ConstKind::Unevaluated(def, substs, promoted) = c.val {
558 self.infcx
559 .const_eval_resolve(
560 obligation.param_env,
561 def,
562 substs,
563 promoted,
564 Some(obligation.cause.span),
565 )
566 .map(|val| ty::Const::from_value(self.tcx(), val, c.ty))
567 } else {
568 Ok(c)
569 }
570 };
571
572 match (evaluate(c1), evaluate(c2)) {
573 (Ok(c1), Ok(c2)) => {
574 match self
575 .infcx()
576 .at(&obligation.cause, obligation.param_env)
577 .eq(c1, c2)
578 {
579 Ok(_) => Ok(EvaluatedToOk),
580 Err(_) => Ok(EvaluatedToErr),
581 }
582 }
583 (Err(ErrorHandled::Reported(ErrorReported)), _)
584 | (_, Err(ErrorHandled::Reported(ErrorReported))) => Ok(EvaluatedToErr),
585 (Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
586 span_bug!(
587 obligation.cause.span(self.tcx()),
588 "ConstEquate: const_eval_resolve returned an unexpected error"
589 )
590 }
591 (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
592 Ok(EvaluatedToAmbig)
593 }
594 }
595 }
596 ty::PredicateAtom::TypeWellFormedFromEnv(..) => {
597 bug!("TypeWellFormedFromEnv is only used for chalk")
598 }
599 }
600 });
601
602 debug!(?result);
603
604 result
605 }
606
607 fn evaluate_trait_predicate_recursively<'o>(
608 &mut self,
609 previous_stack: TraitObligationStackList<'o, 'tcx>,
610 mut obligation: TraitObligation<'tcx>,
611 ) -> Result<EvaluationResult, OverflowError> {
612 debug!(?obligation, "evaluate_trait_predicate_recursively");
613
614 if !self.intercrate
615 && obligation.is_global()
616 && obligation.param_env.caller_bounds().iter().all(|bound| bound.needs_subst())
617 {
618 // If a param env has no global bounds, global obligations do not
619 // depend on its particular value in order to work, so we can clear
620 // out the param env and get better caching.
621 debug!("evaluate_trait_predicate_recursively - in global");
622 obligation.param_env = obligation.param_env.without_caller_bounds();
623 }
624
625 let stack = self.push_stack(previous_stack, &obligation);
626 let fresh_trait_ref = stack.fresh_trait_ref;
627
628 debug!(?fresh_trait_ref);
629
630 if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) {
631 debug!(?result, "CACHE HIT");
632 return Ok(result);
633 }
634
635 if let Some(result) = stack.cache().get_provisional(fresh_trait_ref) {
636 debug!(?result, "PROVISIONAL CACHE HIT");
637 stack.update_reached_depth(stack.cache().current_reached_depth());
638 return Ok(result);
639 }
640
641 // Check if this is a match for something already on the
642 // stack. If so, we don't want to insert the result into the
643 // main cache (it is cycle dependent) nor the provisional
644 // cache (which is meant for things that have completed but
645 // for a "backedge" -- this result *is* the backedge).
646 if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
647 return Ok(cycle_result);
648 }
649
650 let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
651 let result = result?;
652
653 if !result.must_apply_modulo_regions() {
654 stack.cache().on_failure(stack.dfn);
655 }
656
657 let reached_depth = stack.reached_depth.get();
658 if reached_depth >= stack.depth {
659 debug!(?result, "CACHE MISS");
660 self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result);
661
662 stack.cache().on_completion(stack.depth, |fresh_trait_ref, provisional_result| {
663 self.insert_evaluation_cache(
664 obligation.param_env,
665 fresh_trait_ref,
666 dep_node,
667 provisional_result.max(result),
668 );
669 });
670 } else {
671 debug!(?result, "PROVISIONAL");
672 debug!(
673 "evaluate_trait_predicate_recursively: caching provisionally because {:?} \
674 is a cycle participant (at depth {}, reached depth {})",
675 fresh_trait_ref, stack.depth, reached_depth,
676 );
677
678 stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_ref, result);
679 }
680
681 Ok(result)
682 }
683
684 /// If there is any previous entry on the stack that precisely
685 /// matches this obligation, then we can assume that the
686 /// obligation is satisfied for now (still all other conditions
687 /// must be met of course). One obvious case this comes up is
688 /// marker traits like `Send`. Think of a linked list:
689 ///
690 /// struct List<T> { data: T, next: Option<Box<List<T>>> }
691 ///
692 /// `Box<List<T>>` will be `Send` if `T` is `Send` and
693 /// `Option<Box<List<T>>>` is `Send`, and in turn
694 /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
695 /// `Send`.
696 ///
697 /// Note that we do this comparison using the `fresh_trait_ref`
698 /// fields. Because these have all been freshened using
699 /// `self.freshener`, we can be sure that (a) this will not
700 /// affect the inferencer state and (b) that if we see two
701 /// fresh regions with the same index, they refer to the same
702 /// unbound type variable.
703 fn check_evaluation_cycle(
704 &mut self,
705 stack: &TraitObligationStack<'_, 'tcx>,
706 ) -> Option<EvaluationResult> {
707 if let Some(cycle_depth) = stack
708 .iter()
709 .skip(1) // Skip top-most frame.
710 .find(|prev| {
711 stack.obligation.param_env == prev.obligation.param_env
712 && stack.fresh_trait_ref == prev.fresh_trait_ref
713 })
714 .map(|stack| stack.depth)
715 {
716 debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
717
718 // If we have a stack like `A B C D E A`, where the top of
719 // the stack is the final `A`, then this will iterate over
720 // `A, E, D, C, B` -- i.e., all the participants apart
721 // from the cycle head. We mark them as participating in a
722 // cycle. This suppresses caching for those nodes. See
723 // `in_cycle` field for more details.
724 stack.update_reached_depth(cycle_depth);
725
726 // Subtle: when checking for a coinductive cycle, we do
727 // not compare using the "freshened trait refs" (which
728 // have erased regions) but rather the fully explicit
729 // trait refs. This is important because it's only a cycle
730 // if the regions match exactly.
731 let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
732 let tcx = self.tcx();
733 let cycle =
734 cycle.map(|stack| stack.obligation.predicate.without_const().to_predicate(tcx));
735 if self.coinductive_match(cycle) {
736 debug!("evaluate_stack --> recursive, coinductive");
737 Some(EvaluatedToOk)
738 } else {
739 debug!("evaluate_stack --> recursive, inductive");
740 Some(EvaluatedToRecur)
741 }
742 } else {
743 None
744 }
745 }
746
747 fn evaluate_stack<'o>(
748 &mut self,
749 stack: &TraitObligationStack<'o, 'tcx>,
750 ) -> Result<EvaluationResult, OverflowError> {
751 // In intercrate mode, whenever any of the generics are unbound,
752 // there can always be an impl. Even if there are no impls in
753 // this crate, perhaps the type would be unified with
754 // something from another crate that does provide an impl.
755 //
756 // In intra mode, we must still be conservative. The reason is
757 // that we want to avoid cycles. Imagine an impl like:
758 //
759 // impl<T:Eq> Eq for Vec<T>
760 //
761 // and a trait reference like `$0 : Eq` where `$0` is an
762 // unbound variable. When we evaluate this trait-reference, we
763 // will unify `$0` with `Vec<$1>` (for some fresh variable
764 // `$1`), on the condition that `$1 : Eq`. We will then wind
765 // up with many candidates (since that are other `Eq` impls
766 // that apply) and try to winnow things down. This results in
767 // a recursive evaluation that `$1 : Eq` -- as you can
768 // imagine, this is just where we started. To avoid that, we
769 // check for unbound variables and return an ambiguous (hence possible)
770 // match if we've seen this trait before.
771 //
772 // This suffices to allow chains like `FnMut` implemented in
773 // terms of `Fn` etc, but we could probably make this more
774 // precise still.
775 let unbound_input_types =
776 stack.fresh_trait_ref.skip_binder().substs.types().any(|ty| ty.is_fresh());
777 // This check was an imperfect workaround for a bug in the old
778 // intercrate mode; it should be removed when that goes away.
779 if unbound_input_types && self.intercrate {
780 debug!("evaluate_stack --> unbound argument, intercrate --> ambiguous",);
781 // Heuristics: show the diagnostics when there are no candidates in crate.
782 if self.intercrate_ambiguity_causes.is_some() {
783 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
784 if let Ok(candidate_set) = self.assemble_candidates(stack) {
785 if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
786 let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
787 let self_ty = trait_ref.self_ty();
788 let cause =
789 with_no_trimmed_paths(|| IntercrateAmbiguityCause::DownstreamCrate {
790 trait_desc: trait_ref.print_only_trait_path().to_string(),
791 self_desc: if self_ty.has_concrete_skeleton() {
792 Some(self_ty.to_string())
793 } else {
794 None
795 },
796 });
797
798 debug!(?cause, "evaluate_stack: pushing cause");
799 self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
800 }
801 }
802 }
803 return Ok(EvaluatedToAmbig);
804 }
805 if unbound_input_types
806 && stack.iter().skip(1).any(|prev| {
807 stack.obligation.param_env == prev.obligation.param_env
808 && self.match_fresh_trait_refs(
809 stack.fresh_trait_ref,
810 prev.fresh_trait_ref,
811 prev.obligation.param_env,
812 )
813 })
814 {
815 debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
816 return Ok(EvaluatedToUnknown);
817 }
818
819 match self.candidate_from_obligation(stack) {
820 Ok(Some(c)) => self.evaluate_candidate(stack, &c),
821 Ok(None) => Ok(EvaluatedToAmbig),
822 Err(Overflow) => Err(OverflowError),
823 Err(..) => Ok(EvaluatedToErr),
824 }
825 }
826
827 /// For defaulted traits, we use a co-inductive strategy to solve, so
828 /// that recursion is ok. This routine returns `true` if the top of the
829 /// stack (`cycle[0]`):
830 ///
831 /// - is a defaulted trait,
832 /// - it also appears in the backtrace at some position `X`,
833 /// - all the predicates at positions `X..` between `X` and the top are
834 /// also defaulted traits.
835 pub fn coinductive_match<I>(&mut self, cycle: I) -> bool
836 where
837 I: Iterator<Item = ty::Predicate<'tcx>>,
838 {
839 let mut cycle = cycle;
840 cycle.all(|predicate| self.coinductive_predicate(predicate))
841 }
842
843 fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
844 let result = match predicate.skip_binders() {
845 ty::PredicateAtom::Trait(ref data, _) => self.tcx().trait_is_auto(data.def_id()),
846 _ => false,
847 };
848 debug!(?predicate, ?result, "coinductive_predicate");
849 result
850 }
851
852 /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
853 /// obligations are met. Returns whether `candidate` remains viable after this further
854 /// scrutiny.
855 #[instrument(
856 level = "debug",
857 skip(self, stack),
858 fields(depth = stack.obligation.recursion_depth)
859 )]
860 fn evaluate_candidate<'o>(
861 &mut self,
862 stack: &TraitObligationStack<'o, 'tcx>,
863 candidate: &SelectionCandidate<'tcx>,
864 ) -> Result<EvaluationResult, OverflowError> {
865 let result = self.evaluation_probe(|this| {
866 let candidate = (*candidate).clone();
867 match this.confirm_candidate(stack.obligation, candidate) {
868 Ok(selection) => {
869 debug!(?selection);
870 this.evaluate_predicates_recursively(
871 stack.list(),
872 selection.nested_obligations().into_iter(),
873 )
874 }
875 Err(..) => Ok(EvaluatedToErr),
876 }
877 })?;
878 debug!(?result);
879 Ok(result)
880 }
881
882 fn check_evaluation_cache(
883 &self,
884 param_env: ty::ParamEnv<'tcx>,
885 trait_ref: ty::PolyTraitRef<'tcx>,
886 ) -> Option<EvaluationResult> {
887 let tcx = self.tcx();
888 if self.can_use_global_caches(param_env) {
889 if let Some(res) = tcx.evaluation_cache.get(&param_env.and(trait_ref), tcx) {
890 return Some(res);
891 }
892 }
893 self.infcx.evaluation_cache.get(&param_env.and(trait_ref), tcx)
894 }
895
896 fn insert_evaluation_cache(
897 &mut self,
898 param_env: ty::ParamEnv<'tcx>,
899 trait_ref: ty::PolyTraitRef<'tcx>,
900 dep_node: DepNodeIndex,
901 result: EvaluationResult,
902 ) {
903 // Avoid caching results that depend on more than just the trait-ref
904 // - the stack can create recursion.
905 if result.is_stack_dependent() {
906 return;
907 }
908
909 if self.can_use_global_caches(param_env) {
910 if !trait_ref.needs_infer() {
911 debug!(?trait_ref, ?result, "insert_evaluation_cache global");
912 // This may overwrite the cache with the same value
913 // FIXME: Due to #50507 this overwrites the different values
914 // This should be changed to use HashMapExt::insert_same
915 // when that is fixed
916 self.tcx().evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
917 return;
918 }
919 }
920
921 debug!(?trait_ref, ?result, "insert_evaluation_cache");
922 self.infcx.evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
923 }
924
925 /// For various reasons, it's possible for a subobligation
926 /// to have a *lower* recursion_depth than the obligation used to create it.
927 /// Projection sub-obligations may be returned from the projection cache,
928 /// which results in obligations with an 'old' `recursion_depth`.
929 /// Additionally, methods like `InferCtxt.subtype_predicate` produce
930 /// subobligations without taking in a 'parent' depth, causing the
931 /// generated subobligations to have a `recursion_depth` of `0`.
932 ///
933 /// To ensure that obligation_depth never decreasees, we force all subobligations
934 /// to have at least the depth of the original obligation.
935 fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(
936 &self,
937 it: I,
938 min_depth: usize,
939 ) {
940 it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
941 }
942
943 /// Checks that the recursion limit has not been exceeded.
944 ///
945 /// The weird return type of this function allows it to be used with the `try` (`?`)
946 /// operator within certain functions.
947 fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
948 &self,
949 obligation: &Obligation<'tcx, T>,
950 error_obligation: &Obligation<'tcx, V>,
951 ) -> Result<(), OverflowError> {
952 if !self.infcx.tcx.sess.recursion_limit().value_within_limit(obligation.recursion_depth) {
953 match self.query_mode {
954 TraitQueryMode::Standard => {
955 self.infcx().report_overflow_error(error_obligation, true);
956 }
957 TraitQueryMode::Canonical => {
958 return Err(OverflowError);
959 }
960 }
961 }
962 Ok(())
963 }
964
965 fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
966 where
967 OP: FnOnce(&mut Self) -> R,
968 {
969 let (result, dep_node) =
970 self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || op(self));
971 self.tcx().dep_graph.read_index(dep_node);
972 (result, dep_node)
973 }
974
975 // Treat negative impls as unimplemented, and reservation impls as ambiguity.
976 fn filter_negative_and_reservation_impls(
977 &mut self,
978 candidate: SelectionCandidate<'tcx>,
979 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
980 if let ImplCandidate(def_id) = candidate {
981 let tcx = self.tcx();
982 match tcx.impl_polarity(def_id) {
983 ty::ImplPolarity::Negative if !self.allow_negative_impls => {
984 return Err(Unimplemented);
985 }
986 ty::ImplPolarity::Reservation => {
987 if let Some(intercrate_ambiguity_clauses) =
988 &mut self.intercrate_ambiguity_causes
989 {
990 let attrs = tcx.get_attrs(def_id);
991 let attr = tcx.sess.find_by_name(&attrs, sym::rustc_reservation_impl);
992 let value = attr.and_then(|a| a.value_str());
993 if let Some(value) = value {
994 debug!(
995 "filter_negative_and_reservation_impls: \
996 reservation impl ambiguity on {:?}",
997 def_id
998 );
999 intercrate_ambiguity_clauses.push(
1000 IntercrateAmbiguityCause::ReservationImpl {
1001 message: value.to_string(),
1002 },
1003 );
1004 }
1005 }
1006 return Ok(None);
1007 }
1008 _ => {}
1009 };
1010 }
1011 Ok(Some(candidate))
1012 }
1013
1014 fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
1015 debug!("is_knowable(intercrate={:?})", self.intercrate);
1016
1017 if !self.intercrate {
1018 return None;
1019 }
1020
1021 let obligation = &stack.obligation;
1022 let predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
1023
1024 // Okay to skip binder because of the nature of the
1025 // trait-ref-is-knowable check, which does not care about
1026 // bound regions.
1027 let trait_ref = predicate.skip_binder().trait_ref;
1028
1029 coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
1030 }
1031
1032 /// Returns `true` if the global caches can be used.
1033 /// Do note that if the type itself is not in the
1034 /// global tcx, the local caches will be used.
1035 fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
1036 // If there are any inference variables in the `ParamEnv`, then we
1037 // always use a cache local to this particular scope. Otherwise, we
1038 // switch to a global cache.
1039 if param_env.needs_infer() {
1040 return false;
1041 }
1042
1043 // Avoid using the master cache during coherence and just rely
1044 // on the local cache. This effectively disables caching
1045 // during coherence. It is really just a simplification to
1046 // avoid us having to fear that coherence results "pollute"
1047 // the master cache. Since coherence executes pretty quickly,
1048 // it's not worth going to more trouble to increase the
1049 // hit-rate, I don't think.
1050 if self.intercrate {
1051 return false;
1052 }
1053
1054 // Otherwise, we can use the global cache.
1055 true
1056 }
1057
1058 fn check_candidate_cache(
1059 &mut self,
1060 param_env: ty::ParamEnv<'tcx>,
1061 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1062 ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
1063 let tcx = self.tcx();
1064 let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref;
1065 if self.can_use_global_caches(param_env) {
1066 if let Some(res) = tcx.selection_cache.get(&param_env.and(*trait_ref), tcx) {
1067 return Some(res);
1068 }
1069 }
1070 self.infcx.selection_cache.get(&param_env.and(*trait_ref), tcx)
1071 }
1072
1073 /// Determines whether can we safely cache the result
1074 /// of selecting an obligation. This is almost always `true`,
1075 /// except when dealing with certain `ParamCandidate`s.
1076 ///
1077 /// Ordinarily, a `ParamCandidate` will contain no inference variables,
1078 /// since it was usually produced directly from a `DefId`. However,
1079 /// certain cases (currently only librustdoc's blanket impl finder),
1080 /// a `ParamEnv` may be explicitly constructed with inference types.
1081 /// When this is the case, we do *not* want to cache the resulting selection
1082 /// candidate. This is due to the fact that it might not always be possible
1083 /// to equate the obligation's trait ref and the candidate's trait ref,
1084 /// if more constraints end up getting added to an inference variable.
1085 ///
1086 /// Because of this, we always want to re-run the full selection
1087 /// process for our obligation the next time we see it, since
1088 /// we might end up picking a different `SelectionCandidate` (or none at all).
1089 fn can_cache_candidate(
1090 &self,
1091 result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1092 ) -> bool {
1093 match result {
1094 Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.needs_infer(),
1095 _ => true,
1096 }
1097 }
1098
1099 fn insert_candidate_cache(
1100 &mut self,
1101 param_env: ty::ParamEnv<'tcx>,
1102 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1103 dep_node: DepNodeIndex,
1104 candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1105 ) {
1106 let tcx = self.tcx();
1107 let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref;
1108
1109 if !self.can_cache_candidate(&candidate) {
1110 debug!(?trait_ref, ?candidate, "insert_candidate_cache - candidate is not cacheable");
1111 return;
1112 }
1113
1114 if self.can_use_global_caches(param_env) {
1115 if let Err(Overflow) = candidate {
1116 // Don't cache overflow globally; we only produce this in certain modes.
1117 } else if !trait_ref.needs_infer() {
1118 if !candidate.needs_infer() {
1119 debug!(?trait_ref, ?candidate, "insert_candidate_cache global");
1120 // This may overwrite the cache with the same value.
1121 tcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
1122 return;
1123 }
1124 }
1125 }
1126
1127 debug!(?trait_ref, ?candidate, "insert_candidate_cache local");
1128 self.infcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
1129 }
1130
1131 /// Matches a predicate against the bounds of its self type.
1132 ///
1133 /// Given an obligation like `<T as Foo>::Bar: Baz` where the self type is
1134 /// a projection, look at the bounds of `T::Bar`, see if we can find a
1135 /// `Baz` bound. We return indexes into the list returned by
1136 /// `tcx.item_bounds` for any applicable bounds.
1137 fn match_projection_obligation_against_definition_bounds(
1138 &mut self,
1139 obligation: &TraitObligation<'tcx>,
1140 ) -> smallvec::SmallVec<[usize; 2]> {
1141 let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
1142 let placeholder_trait_predicate =
1143 self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate);
1144 debug!(
1145 ?placeholder_trait_predicate,
1146 "match_projection_obligation_against_definition_bounds"
1147 );
1148
1149 let tcx = self.infcx.tcx;
1150 let (def_id, substs) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
1151 ty::Projection(ref data) => (data.item_def_id, data.substs),
1152 ty::Opaque(def_id, substs) => (def_id, substs),
1153 _ => {
1154 span_bug!(
1155 obligation.cause.span,
1156 "match_projection_obligation_against_definition_bounds() called \
1157 but self-ty is not a projection: {:?}",
1158 placeholder_trait_predicate.trait_ref.self_ty()
1159 );
1160 }
1161 };
1162 let bounds = tcx.item_bounds(def_id).subst(tcx, substs);
1163
1164 // The bounds returned by `item_bounds` may contain duplicates after
1165 // normalization, so try to deduplicate when possible to avoid
1166 // unnecessary ambiguity.
1167 let mut distinct_normalized_bounds = FxHashSet::default();
1168
1169 let matching_bounds = bounds
1170 .iter()
1171 .enumerate()
1172 .filter_map(|(idx, bound)| {
1173 let bound_predicate = bound.bound_atom();
1174 if let ty::PredicateAtom::Trait(pred, _) = bound_predicate.skip_binder() {
1175 let bound = bound_predicate.rebind(pred.trait_ref);
1176 if self.infcx.probe(|_| {
1177 match self.match_normalize_trait_ref(
1178 obligation,
1179 bound,
1180 placeholder_trait_predicate.trait_ref,
1181 ) {
1182 Ok(None) => true,
1183 Ok(Some(normalized_trait))
1184 if distinct_normalized_bounds.insert(normalized_trait) =>
1185 {
1186 true
1187 }
1188 _ => false,
1189 }
1190 }) {
1191 return Some(idx);
1192 }
1193 }
1194 None
1195 })
1196 .collect();
1197
1198 debug!(?matching_bounds, "match_projection_obligation_against_definition_bounds");
1199 matching_bounds
1200 }
1201
1202 /// Equates the trait in `obligation` with trait bound. If the two traits
1203 /// can be equated and the normalized trait bound doesn't contain inference
1204 /// variables or placeholders, the normalized bound is returned.
1205 fn match_normalize_trait_ref(
1206 &mut self,
1207 obligation: &TraitObligation<'tcx>,
1208 trait_bound: ty::PolyTraitRef<'tcx>,
1209 placeholder_trait_ref: ty::TraitRef<'tcx>,
1210 ) -> Result<Option<ty::PolyTraitRef<'tcx>>, ()> {
1211 debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
1212 if placeholder_trait_ref.def_id != trait_bound.def_id() {
1213 // Avoid unnecessary normalization
1214 return Err(());
1215 }
1216
1217 let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
1218 project::normalize_with_depth(
1219 self,
1220 obligation.param_env,
1221 obligation.cause.clone(),
1222 obligation.recursion_depth + 1,
1223 &trait_bound,
1224 )
1225 });
1226 self.infcx
1227 .at(&obligation.cause, obligation.param_env)
1228 .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
1229 .map(|InferOk { obligations: _, value: () }| {
1230 // This method is called within a probe, so we can't have
1231 // inference variables and placeholders escape.
1232 if !trait_bound.needs_infer() && !trait_bound.has_placeholders() {
1233 Some(trait_bound)
1234 } else {
1235 None
1236 }
1237 })
1238 .map_err(|_| ())
1239 }
1240
1241 fn evaluate_where_clause<'o>(
1242 &mut self,
1243 stack: &TraitObligationStack<'o, 'tcx>,
1244 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1245 ) -> Result<EvaluationResult, OverflowError> {
1246 self.evaluation_probe(|this| {
1247 match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
1248 Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
1249 Err(()) => Ok(EvaluatedToErr),
1250 }
1251 })
1252 }
1253
1254 pub(super) fn match_projection_projections(
1255 &mut self,
1256 obligation: &ProjectionTyObligation<'tcx>,
1257 obligation_trait_ref: &ty::TraitRef<'tcx>,
1258 data: &PolyProjectionPredicate<'tcx>,
1259 potentially_unnormalized_candidates: bool,
1260 ) -> bool {
1261 let mut nested_obligations = Vec::new();
1262 let projection_ty = if potentially_unnormalized_candidates {
1263 ensure_sufficient_stack(|| {
1264 project::normalize_with_depth_to(
1265 self,
1266 obligation.param_env,
1267 obligation.cause.clone(),
1268 obligation.recursion_depth + 1,
1269 &data.map_bound_ref(|data| data.projection_ty),
1270 &mut nested_obligations,
1271 )
1272 })
1273 } else {
1274 data.map_bound_ref(|data| data.projection_ty)
1275 };
1276
1277 // FIXME(generic_associated_types): Compare the whole projections
1278 let data_poly_trait_ref = projection_ty.map_bound(|proj| proj.trait_ref(self.tcx()));
1279 let obligation_poly_trait_ref = obligation_trait_ref.to_poly_trait_ref();
1280 self.infcx
1281 .at(&obligation.cause, obligation.param_env)
1282 .sup(obligation_poly_trait_ref, data_poly_trait_ref)
1283 .map_or(false, |InferOk { obligations, value: () }| {
1284 self.evaluate_predicates_recursively(
1285 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
1286 nested_obligations.into_iter().chain(obligations),
1287 )
1288 .map_or(false, |res| res.may_apply())
1289 })
1290 }
1291
1292 ///////////////////////////////////////////////////////////////////////////
1293 // WINNOW
1294 //
1295 // Winnowing is the process of attempting to resolve ambiguity by
1296 // probing further. During the winnowing process, we unify all
1297 // type variables and then we also attempt to evaluate recursive
1298 // bounds to see if they are satisfied.
1299
1300 /// Returns `true` if `victim` should be dropped in favor of
1301 /// `other`. Generally speaking we will drop duplicate
1302 /// candidates and prefer where-clause candidates.
1303 ///
1304 /// See the comment for "SelectionCandidate" for more details.
1305 fn candidate_should_be_dropped_in_favor_of(
1306 &mut self,
1307 victim: &EvaluatedCandidate<'tcx>,
1308 other: &EvaluatedCandidate<'tcx>,
1309 needs_infer: bool,
1310 ) -> bool {
1311 if victim.candidate == other.candidate {
1312 return true;
1313 }
1314
1315 // Check if a bound would previously have been removed when normalizing
1316 // the param_env so that it can be given the lowest priority. See
1317 // #50825 for the motivation for this.
1318 let is_global =
1319 |cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
1320
1321 // (*) Prefer `BuiltinCandidate { has_nested: false }` and `DiscriminantKindCandidate`
1322 // to anything else.
1323 //
1324 // This is a fix for #53123 and prevents winnowing from accidentally extending the
1325 // lifetime of a variable.
1326 match (&other.candidate, &victim.candidate) {
1327 (_, AutoImplCandidate(..)) | (AutoImplCandidate(..), _) => {
1328 bug!(
1329 "default implementations shouldn't be recorded \
1330 when there are other valid candidates"
1331 );
1332 }
1333
1334 // (*)
1335 (BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate, _) => true,
1336 (_, BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate) => false,
1337
1338 (ParamCandidate(..), ParamCandidate(..)) => false,
1339
1340 // Global bounds from the where clause should be ignored
1341 // here (see issue #50825). Otherwise, we have a where
1342 // clause so don't go around looking for impls.
1343 // Arbitrarily give param candidates priority
1344 // over projection and object candidates.
1345 (
1346 ParamCandidate(ref cand),
1347 ImplCandidate(..)
1348 | ClosureCandidate
1349 | GeneratorCandidate
1350 | FnPointerCandidate
1351 | BuiltinObjectCandidate
1352 | BuiltinUnsizeCandidate
1353 | BuiltinCandidate { .. }
1354 | TraitAliasCandidate(..)
1355 | ObjectCandidate(_)
1356 | ProjectionCandidate(_),
1357 ) => !is_global(cand),
1358 (ObjectCandidate(_) | ProjectionCandidate(_), ParamCandidate(ref cand)) => {
1359 // Prefer these to a global where-clause bound
1360 // (see issue #50825).
1361 is_global(cand)
1362 }
1363 (
1364 ImplCandidate(_)
1365 | ClosureCandidate
1366 | GeneratorCandidate
1367 | FnPointerCandidate
1368 | BuiltinObjectCandidate
1369 | BuiltinUnsizeCandidate
1370 | BuiltinCandidate { has_nested: true }
1371 | TraitAliasCandidate(..),
1372 ParamCandidate(ref cand),
1373 ) => {
1374 // Prefer these to a global where-clause bound
1375 // (see issue #50825).
1376 is_global(cand) && other.evaluation.must_apply_modulo_regions()
1377 }
1378
1379 (ProjectionCandidate(i), ProjectionCandidate(j))
1380 | (ObjectCandidate(i), ObjectCandidate(j)) => {
1381 // Arbitrarily pick the lower numbered candidate for backwards
1382 // compatibility reasons. Don't let this affect inference.
1383 i < j && !needs_infer
1384 }
1385 (ObjectCandidate(_), ProjectionCandidate(_))
1386 | (ProjectionCandidate(_), ObjectCandidate(_)) => {
1387 bug!("Have both object and projection candidate")
1388 }
1389
1390 // Arbitrarily give projection and object candidates priority.
1391 (
1392 ObjectCandidate(_) | ProjectionCandidate(_),
1393 ImplCandidate(..)
1394 | ClosureCandidate
1395 | GeneratorCandidate
1396 | FnPointerCandidate
1397 | BuiltinObjectCandidate
1398 | BuiltinUnsizeCandidate
1399 | BuiltinCandidate { .. }
1400 | TraitAliasCandidate(..),
1401 ) => true,
1402
1403 (
1404 ImplCandidate(..)
1405 | ClosureCandidate
1406 | GeneratorCandidate
1407 | FnPointerCandidate
1408 | BuiltinObjectCandidate
1409 | BuiltinUnsizeCandidate
1410 | BuiltinCandidate { .. }
1411 | TraitAliasCandidate(..),
1412 ObjectCandidate(_) | ProjectionCandidate(_),
1413 ) => false,
1414
1415 (&ImplCandidate(other_def), &ImplCandidate(victim_def)) => {
1416 // See if we can toss out `victim` based on specialization.
1417 // This requires us to know *for sure* that the `other` impl applies
1418 // i.e., `EvaluatedToOk`.
1419 if other.evaluation.must_apply_modulo_regions() {
1420 let tcx = self.tcx();
1421 if tcx.specializes((other_def, victim_def)) {
1422 return true;
1423 }
1424 return match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
1425 Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
1426 // Subtle: If the predicate we are evaluating has inference
1427 // variables, do *not* allow discarding candidates due to
1428 // marker trait impls.
1429 //
1430 // Without this restriction, we could end up accidentally
1431 // constrainting inference variables based on an arbitrarily
1432 // chosen trait impl.
1433 //
1434 // Imagine we have the following code:
1435 //
1436 // ```rust
1437 // #[marker] trait MyTrait {}
1438 // impl MyTrait for u8 {}
1439 // impl MyTrait for bool {}
1440 // ```
1441 //
1442 // And we are evaluating the predicate `<_#0t as MyTrait>`.
1443 //
1444 // During selection, we will end up with one candidate for each
1445 // impl of `MyTrait`. If we were to discard one impl in favor
1446 // of the other, we would be left with one candidate, causing
1447 // us to "successfully" select the predicate, unifying
1448 // _#0t with (for example) `u8`.
1449 //
1450 // However, we have no reason to believe that this unification
1451 // is correct - we've essentially just picked an arbitrary
1452 // *possibility* for _#0t, and required that this be the *only*
1453 // possibility.
1454 //
1455 // Eventually, we will either:
1456 // 1) Unify all inference variables in the predicate through
1457 // some other means (e.g. type-checking of a function). We will
1458 // then be in a position to drop marker trait candidates
1459 // without constraining inference variables (since there are
1460 // none left to constrin)
1461 // 2) Be left with some unconstrained inference variables. We
1462 // will then correctly report an inference error, since the
1463 // existence of multiple marker trait impls tells us nothing
1464 // about which one should actually apply.
1465 !needs_infer
1466 }
1467 Some(_) => true,
1468 None => false,
1469 };
1470 } else {
1471 false
1472 }
1473 }
1474
1475 // Everything else is ambiguous
1476 (
1477 ImplCandidate(_)
1478 | ClosureCandidate
1479 | GeneratorCandidate
1480 | FnPointerCandidate
1481 | BuiltinObjectCandidate
1482 | BuiltinUnsizeCandidate
1483 | BuiltinCandidate { has_nested: true }
1484 | TraitAliasCandidate(..),
1485 ImplCandidate(_)
1486 | ClosureCandidate
1487 | GeneratorCandidate
1488 | FnPointerCandidate
1489 | BuiltinObjectCandidate
1490 | BuiltinUnsizeCandidate
1491 | BuiltinCandidate { has_nested: true }
1492 | TraitAliasCandidate(..),
1493 ) => false,
1494 }
1495 }
1496
1497 fn sized_conditions(
1498 &mut self,
1499 obligation: &TraitObligation<'tcx>,
1500 ) -> BuiltinImplConditions<'tcx> {
1501 use self::BuiltinImplConditions::{Ambiguous, None, Where};
1502
1503 // NOTE: binder moved to (*)
1504 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
1505
1506 match self_ty.kind() {
1507 ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
1508 | ty::Uint(_)
1509 | ty::Int(_)
1510 | ty::Bool
1511 | ty::Float(_)
1512 | ty::FnDef(..)
1513 | ty::FnPtr(_)
1514 | ty::RawPtr(..)
1515 | ty::Char
1516 | ty::Ref(..)
1517 | ty::Generator(..)
1518 | ty::GeneratorWitness(..)
1519 | ty::Array(..)
1520 | ty::Closure(..)
1521 | ty::Never
1522 | ty::Error(_) => {
1523 // safe for everything
1524 Where(ty::Binder::dummy(Vec::new()))
1525 }
1526
1527 ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
1528
1529 ty::Tuple(tys) => Where(
1530 obligation
1531 .predicate
1532 .rebind(tys.last().into_iter().map(|k| k.expect_ty()).collect()),
1533 ),
1534
1535 ty::Adt(def, substs) => {
1536 let sized_crit = def.sized_constraint(self.tcx());
1537 // (*) binder moved here
1538 Where(
1539 obligation.predicate.rebind({
1540 sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect()
1541 }),
1542 )
1543 }
1544
1545 ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
1546 ty::Infer(ty::TyVar(_)) => Ambiguous,
1547
1548 ty::Placeholder(..)
1549 | ty::Bound(..)
1550 | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1551 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
1552 }
1553 }
1554 }
1555
1556 fn copy_clone_conditions(
1557 &mut self,
1558 obligation: &TraitObligation<'tcx>,
1559 ) -> BuiltinImplConditions<'tcx> {
1560 // NOTE: binder moved to (*)
1561 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
1562
1563 use self::BuiltinImplConditions::{Ambiguous, None, Where};
1564
1565 match *self_ty.kind() {
1566 ty::Infer(ty::IntVar(_))
1567 | ty::Infer(ty::FloatVar(_))
1568 | ty::FnDef(..)
1569 | ty::FnPtr(_)
1570 | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
1571
1572 ty::Uint(_)
1573 | ty::Int(_)
1574 | ty::Bool
1575 | ty::Float(_)
1576 | ty::Char
1577 | ty::RawPtr(..)
1578 | ty::Never
1579 | ty::Ref(_, _, hir::Mutability::Not) => {
1580 // Implementations provided in libcore
1581 None
1582 }
1583
1584 ty::Dynamic(..)
1585 | ty::Str
1586 | ty::Slice(..)
1587 | ty::Generator(..)
1588 | ty::GeneratorWitness(..)
1589 | ty::Foreign(..)
1590 | ty::Ref(_, _, hir::Mutability::Mut) => None,
1591
1592 ty::Array(element_ty, _) => {
1593 // (*) binder moved here
1594 Where(obligation.predicate.rebind(vec![element_ty]))
1595 }
1596
1597 ty::Tuple(tys) => {
1598 // (*) binder moved here
1599 Where(obligation.predicate.rebind(tys.iter().map(|k| k.expect_ty()).collect()))
1600 }
1601
1602 ty::Closure(_, substs) => {
1603 // (*) binder moved here
1604 let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
1605 if let ty::Infer(ty::TyVar(_)) = ty.kind() {
1606 // Not yet resolved.
1607 Ambiguous
1608 } else {
1609 Where(obligation.predicate.rebind(substs.as_closure().upvar_tys().collect()))
1610 }
1611 }
1612
1613 ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
1614 // Fallback to whatever user-defined impls exist in this case.
1615 None
1616 }
1617
1618 ty::Infer(ty::TyVar(_)) => {
1619 // Unbound type variable. Might or might not have
1620 // applicable impls and so forth, depending on what
1621 // those type variables wind up being bound to.
1622 Ambiguous
1623 }
1624
1625 ty::Placeholder(..)
1626 | ty::Bound(..)
1627 | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1628 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
1629 }
1630 }
1631 }
1632
1633 /// For default impls, we need to break apart a type into its
1634 /// "constituent types" -- meaning, the types that it contains.
1635 ///
1636 /// Here are some (simple) examples:
1637 ///
1638 /// ```
1639 /// (i32, u32) -> [i32, u32]
1640 /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
1641 /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
1642 /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
1643 /// ```
1644 fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
1645 match *t.kind() {
1646 ty::Uint(_)
1647 | ty::Int(_)
1648 | ty::Bool
1649 | ty::Float(_)
1650 | ty::FnDef(..)
1651 | ty::FnPtr(_)
1652 | ty::Str
1653 | ty::Error(_)
1654 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
1655 | ty::Never
1656 | ty::Char => Vec::new(),
1657
1658 ty::Placeholder(..)
1659 | ty::Dynamic(..)
1660 | ty::Param(..)
1661 | ty::Foreign(..)
1662 | ty::Projection(..)
1663 | ty::Bound(..)
1664 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1665 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
1666 }
1667
1668 ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
1669 vec![element_ty]
1670 }
1671
1672 ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty],
1673
1674 ty::Tuple(ref tys) => {
1675 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
1676 tys.iter().map(|k| k.expect_ty()).collect()
1677 }
1678
1679 ty::Closure(_, ref substs) => {
1680 let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
1681 vec![ty]
1682 }
1683
1684 ty::Generator(_, ref substs, _) => {
1685 let ty = self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
1686 let witness = substs.as_generator().witness();
1687 vec![ty].into_iter().chain(iter::once(witness)).collect()
1688 }
1689
1690 ty::GeneratorWitness(types) => {
1691 // This is sound because no regions in the witness can refer to
1692 // the binder outside the witness. So we'll effectivly reuse
1693 // the implicit binder around the witness.
1694 types.skip_binder().to_vec()
1695 }
1696
1697 // For `PhantomData<T>`, we pass `T`.
1698 ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(),
1699
1700 ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(),
1701
1702 ty::Opaque(def_id, substs) => {
1703 // We can resolve the `impl Trait` to its concrete type,
1704 // which enforces a DAG between the functions requiring
1705 // the auto trait bounds in question.
1706 vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)]
1707 }
1708 }
1709 }
1710
1711 fn collect_predicates_for_types(
1712 &mut self,
1713 param_env: ty::ParamEnv<'tcx>,
1714 cause: ObligationCause<'tcx>,
1715 recursion_depth: usize,
1716 trait_def_id: DefId,
1717 types: ty::Binder<Vec<Ty<'tcx>>>,
1718 ) -> Vec<PredicateObligation<'tcx>> {
1719 // Because the types were potentially derived from
1720 // higher-ranked obligations they may reference late-bound
1721 // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
1722 // yield a type like `for<'a> &'a i32`. In general, we
1723 // maintain the invariant that we never manipulate bound
1724 // regions, so we have to process these bound regions somehow.
1725 //
1726 // The strategy is to:
1727 //
1728 // 1. Instantiate those regions to placeholder regions (e.g.,
1729 // `for<'a> &'a i32` becomes `&0 i32`.
1730 // 2. Produce something like `&'0 i32 : Copy`
1731 // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
1732
1733 types
1734 .skip_binder() // binder moved -\
1735 .iter()
1736 .flat_map(|ty| {
1737 let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/
1738
1739 self.infcx.commit_unconditionally(|_| {
1740 let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(&ty);
1741 let Normalized { value: normalized_ty, mut obligations } =
1742 ensure_sufficient_stack(|| {
1743 project::normalize_with_depth(
1744 self,
1745 param_env,
1746 cause.clone(),
1747 recursion_depth,
1748 &placeholder_ty,
1749 )
1750 });
1751 let placeholder_obligation = predicate_for_trait_def(
1752 self.tcx(),
1753 param_env,
1754 cause.clone(),
1755 trait_def_id,
1756 recursion_depth,
1757 normalized_ty,
1758 &[],
1759 );
1760 obligations.push(placeholder_obligation);
1761 obligations
1762 })
1763 })
1764 .collect()
1765 }
1766
1767 ///////////////////////////////////////////////////////////////////////////
1768 // Matching
1769 //
1770 // Matching is a common path used for both evaluation and
1771 // confirmation. It basically unifies types that appear in impls
1772 // and traits. This does affect the surrounding environment;
1773 // therefore, when used during evaluation, match routines must be
1774 // run inside of a `probe()` so that their side-effects are
1775 // contained.
1776
1777 fn rematch_impl(
1778 &mut self,
1779 impl_def_id: DefId,
1780 obligation: &TraitObligation<'tcx>,
1781 ) -> Normalized<'tcx, SubstsRef<'tcx>> {
1782 match self.match_impl(impl_def_id, obligation) {
1783 Ok(substs) => substs,
1784 Err(()) => {
1785 bug!(
1786 "Impl {:?} was matchable against {:?} but now is not",
1787 impl_def_id,
1788 obligation
1789 );
1790 }
1791 }
1792 }
1793
1794 fn match_impl(
1795 &mut self,
1796 impl_def_id: DefId,
1797 obligation: &TraitObligation<'tcx>,
1798 ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
1799 debug!(?impl_def_id, ?obligation, "match_impl");
1800 let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
1801
1802 // Before we create the substitutions and everything, first
1803 // consider a "quick reject". This avoids creating more types
1804 // and so forth that we need to.
1805 if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
1806 return Err(());
1807 }
1808
1809 let placeholder_obligation =
1810 self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate);
1811 let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
1812
1813 let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
1814
1815 let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
1816
1817 let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
1818 ensure_sufficient_stack(|| {
1819 project::normalize_with_depth(
1820 self,
1821 obligation.param_env,
1822 obligation.cause.clone(),
1823 obligation.recursion_depth + 1,
1824 &impl_trait_ref,
1825 )
1826 });
1827
1828 debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
1829
1830 let InferOk { obligations, .. } = self
1831 .infcx
1832 .at(&obligation.cause, obligation.param_env)
1833 .eq(placeholder_obligation_trait_ref, impl_trait_ref)
1834 .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
1835 nested_obligations.extend(obligations);
1836
1837 if !self.intercrate
1838 && self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
1839 {
1840 debug!("match_impl: reservation impls only apply in intercrate mode");
1841 return Err(());
1842 }
1843
1844 debug!(?impl_substs, "match_impl: success");
1845 Ok(Normalized { value: impl_substs, obligations: nested_obligations })
1846 }
1847
1848 fn fast_reject_trait_refs(
1849 &mut self,
1850 obligation: &TraitObligation<'_>,
1851 impl_trait_ref: &ty::TraitRef<'_>,
1852 ) -> bool {
1853 // We can avoid creating type variables and doing the full
1854 // substitution if we find that any of the input types, when
1855 // simplified, do not match.
1856
1857 obligation.predicate.skip_binder().trait_ref.substs.iter().zip(impl_trait_ref.substs).any(
1858 |(obligation_arg, impl_arg)| {
1859 match (obligation_arg.unpack(), impl_arg.unpack()) {
1860 (GenericArgKind::Type(obligation_ty), GenericArgKind::Type(impl_ty)) => {
1861 let simplified_obligation_ty =
1862 fast_reject::simplify_type(self.tcx(), obligation_ty, true);
1863 let simplified_impl_ty =
1864 fast_reject::simplify_type(self.tcx(), impl_ty, false);
1865
1866 simplified_obligation_ty.is_some()
1867 && simplified_impl_ty.is_some()
1868 && simplified_obligation_ty != simplified_impl_ty
1869 }
1870 (GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => {
1871 // Lifetimes can never cause a rejection.
1872 false
1873 }
1874 (GenericArgKind::Const(_), GenericArgKind::Const(_)) => {
1875 // Conservatively ignore consts (i.e. assume they might
1876 // unify later) until we have `fast_reject` support for
1877 // them (if we'll ever need it, even).
1878 false
1879 }
1880 _ => unreachable!(),
1881 }
1882 },
1883 )
1884 }
1885
1886 /// Normalize `where_clause_trait_ref` and try to match it against
1887 /// `obligation`. If successful, return any predicates that
1888 /// result from the normalization.
1889 fn match_where_clause_trait_ref(
1890 &mut self,
1891 obligation: &TraitObligation<'tcx>,
1892 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1893 ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
1894 self.match_poly_trait_ref(obligation, where_clause_trait_ref)
1895 }
1896
1897 /// Returns `Ok` if `poly_trait_ref` being true implies that the
1898 /// obligation is satisfied.
1899 fn match_poly_trait_ref(
1900 &mut self,
1901 obligation: &TraitObligation<'tcx>,
1902 poly_trait_ref: ty::PolyTraitRef<'tcx>,
1903 ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
1904 debug!(?obligation, ?poly_trait_ref, "match_poly_trait_ref");
1905
1906 self.infcx
1907 .at(&obligation.cause, obligation.param_env)
1908 .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
1909 .map(|InferOk { obligations, .. }| obligations)
1910 .map_err(|_| ())
1911 }
1912
1913 ///////////////////////////////////////////////////////////////////////////
1914 // Miscellany
1915
1916 fn match_fresh_trait_refs(
1917 &self,
1918 previous: ty::PolyTraitRef<'tcx>,
1919 current: ty::PolyTraitRef<'tcx>,
1920 param_env: ty::ParamEnv<'tcx>,
1921 ) -> bool {
1922 let mut matcher = ty::_match::Match::new(self.tcx(), param_env);
1923 matcher.relate(previous, current).is_ok()
1924 }
1925
1926 fn push_stack<'o>(
1927 &mut self,
1928 previous_stack: TraitObligationStackList<'o, 'tcx>,
1929 obligation: &'o TraitObligation<'tcx>,
1930 ) -> TraitObligationStack<'o, 'tcx> {
1931 let fresh_trait_ref =
1932 obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener);
1933
1934 let dfn = previous_stack.cache.next_dfn();
1935 let depth = previous_stack.depth() + 1;
1936 TraitObligationStack {
1937 obligation,
1938 fresh_trait_ref,
1939 reached_depth: Cell::new(depth),
1940 previous: previous_stack,
1941 dfn,
1942 depth,
1943 }
1944 }
1945
1946 fn closure_trait_ref_unnormalized(
1947 &mut self,
1948 obligation: &TraitObligation<'tcx>,
1949 substs: SubstsRef<'tcx>,
1950 ) -> ty::PolyTraitRef<'tcx> {
1951 debug!(?obligation, ?substs, "closure_trait_ref_unnormalized");
1952 let closure_sig = substs.as_closure().sig();
1953
1954 debug!(?closure_sig);
1955
1956 // (1) Feels icky to skip the binder here, but OTOH we know
1957 // that the self-type is an unboxed closure type and hence is
1958 // in fact unparameterized (or at least does not reference any
1959 // regions bound in the obligation). Still probably some
1960 // refactoring could make this nicer.
1961 closure_trait_ref_and_return_type(
1962 self.tcx(),
1963 obligation.predicate.def_id(),
1964 obligation.predicate.skip_binder().self_ty(), // (1)
1965 closure_sig,
1966 util::TupleArgumentsFlag::No,
1967 )
1968 .map_bound(|(trait_ref, _)| trait_ref)
1969 }
1970
1971 fn generator_trait_ref_unnormalized(
1972 &mut self,
1973 obligation: &TraitObligation<'tcx>,
1974 substs: SubstsRef<'tcx>,
1975 ) -> ty::PolyTraitRef<'tcx> {
1976 let gen_sig = substs.as_generator().poly_sig();
1977
1978 // (1) Feels icky to skip the binder here, but OTOH we know
1979 // that the self-type is an generator type and hence is
1980 // in fact unparameterized (or at least does not reference any
1981 // regions bound in the obligation). Still probably some
1982 // refactoring could make this nicer.
1983
1984 super::util::generator_trait_ref_and_outputs(
1985 self.tcx(),
1986 obligation.predicate.def_id(),
1987 obligation.predicate.skip_binder().self_ty(), // (1)
1988 gen_sig,
1989 )
1990 .map_bound(|(trait_ref, ..)| trait_ref)
1991 }
1992
1993 /// Returns the obligations that are implied by instantiating an
1994 /// impl or trait. The obligations are substituted and fully
1995 /// normalized. This is used when confirming an impl or default
1996 /// impl.
1997 fn impl_or_trait_obligations(
1998 &mut self,
1999 cause: ObligationCause<'tcx>,
2000 recursion_depth: usize,
2001 param_env: ty::ParamEnv<'tcx>,
2002 def_id: DefId, // of impl or trait
2003 substs: SubstsRef<'tcx>, // for impl or trait
2004 ) -> Vec<PredicateObligation<'tcx>> {
2005 debug!(?def_id, "impl_or_trait_obligations");
2006 let tcx = self.tcx();
2007
2008 // To allow for one-pass evaluation of the nested obligation,
2009 // each predicate must be preceded by the obligations required
2010 // to normalize it.
2011 // for example, if we have:
2012 // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
2013 // the impl will have the following predicates:
2014 // <V as Iterator>::Item = U,
2015 // U: Iterator, U: Sized,
2016 // V: Iterator, V: Sized,
2017 // <U as Iterator>::Item: Copy
2018 // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
2019 // obligation will normalize to `<$0 as Iterator>::Item = $1` and
2020 // `$1: Copy`, so we must ensure the obligations are emitted in
2021 // that order.
2022 let predicates = tcx.predicates_of(def_id);
2023 assert_eq!(predicates.parent, None);
2024 let mut obligations = Vec::with_capacity(predicates.predicates.len());
2025 for (predicate, _) in predicates.predicates {
2026 let predicate = normalize_with_depth_to(
2027 self,
2028 param_env,
2029 cause.clone(),
2030 recursion_depth,
2031 &predicate.subst(tcx, substs),
2032 &mut obligations,
2033 );
2034 obligations.push(Obligation {
2035 cause: cause.clone(),
2036 recursion_depth,
2037 param_env,
2038 predicate,
2039 });
2040 }
2041
2042 // We are performing deduplication here to avoid exponential blowups
2043 // (#38528) from happening, but the real cause of the duplication is
2044 // unknown. What we know is that the deduplication avoids exponential
2045 // amount of predicates being propagated when processing deeply nested
2046 // types.
2047 //
2048 // This code is hot enough that it's worth avoiding the allocation
2049 // required for the FxHashSet when possible. Special-casing lengths 0,
2050 // 1 and 2 covers roughly 75-80% of the cases.
2051 if obligations.len() <= 1 {
2052 // No possibility of duplicates.
2053 } else if obligations.len() == 2 {
2054 // Only two elements. Drop the second if they are equal.
2055 if obligations[0] == obligations[1] {
2056 obligations.truncate(1);
2057 }
2058 } else {
2059 // Three or more elements. Use a general deduplication process.
2060 let mut seen = FxHashSet::default();
2061 obligations.retain(|i| seen.insert(i.clone()));
2062 }
2063
2064 obligations
2065 }
2066 }
2067
2068 trait TraitObligationExt<'tcx> {
2069 fn derived_cause(
2070 &self,
2071 variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
2072 ) -> ObligationCause<'tcx>;
2073 }
2074
2075 impl<'tcx> TraitObligationExt<'tcx> for TraitObligation<'tcx> {
2076 fn derived_cause(
2077 &self,
2078 variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
2079 ) -> ObligationCause<'tcx> {
2080 /*!
2081 * Creates a cause for obligations that are derived from
2082 * `obligation` by a recursive search (e.g., for a builtin
2083 * bound, or eventually a `auto trait Foo`). If `obligation`
2084 * is itself a derived obligation, this is just a clone, but
2085 * otherwise we create a "derived obligation" cause so as to
2086 * keep track of the original root obligation for error
2087 * reporting.
2088 */
2089
2090 let obligation = self;
2091
2092 // NOTE(flaper87): As of now, it keeps track of the whole error
2093 // chain. Ideally, we should have a way to configure this either
2094 // by using -Z verbose or just a CLI argument.
2095 let derived_cause = DerivedObligationCause {
2096 parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
2097 parent_code: Rc::new(obligation.cause.code.clone()),
2098 };
2099 let derived_code = variant(derived_cause);
2100 ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code)
2101 }
2102 }
2103
2104 impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
2105 fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2106 TraitObligationStackList::with(self)
2107 }
2108
2109 fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
2110 self.previous.cache
2111 }
2112
2113 fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2114 self.list()
2115 }
2116
2117 /// Indicates that attempting to evaluate this stack entry
2118 /// required accessing something from the stack at depth `reached_depth`.
2119 fn update_reached_depth(&self, reached_depth: usize) {
2120 assert!(
2121 self.depth > reached_depth,
2122 "invoked `update_reached_depth` with something under this stack: \
2123 self.depth={} reached_depth={}",
2124 self.depth,
2125 reached_depth,
2126 );
2127 debug!(reached_depth, "update_reached_depth");
2128 let mut p = self;
2129 while reached_depth < p.depth {
2130 debug!(?p.fresh_trait_ref, "update_reached_depth: marking as cycle participant");
2131 p.reached_depth.set(p.reached_depth.get().min(reached_depth));
2132 p = p.previous.head.unwrap();
2133 }
2134 }
2135 }
2136
2137 /// The "provisional evaluation cache" is used to store intermediate cache results
2138 /// when solving auto traits. Auto traits are unusual in that they can support
2139 /// cycles. So, for example, a "proof tree" like this would be ok:
2140 ///
2141 /// - `Foo<T>: Send` :-
2142 /// - `Bar<T>: Send` :-
2143 /// - `Foo<T>: Send` -- cycle, but ok
2144 /// - `Baz<T>: Send`
2145 ///
2146 /// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
2147 /// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
2148 /// For non-auto traits, this cycle would be an error, but for auto traits (because
2149 /// they are coinductive) it is considered ok.
2150 ///
2151 /// However, there is a complication: at the point where we have
2152 /// "proven" `Bar<T>: Send`, we have in fact only proven it
2153 /// *provisionally*. In particular, we proved that `Bar<T>: Send`
2154 /// *under the assumption* that `Foo<T>: Send`. But what if we later
2155 /// find out this assumption is wrong? Specifically, we could
2156 /// encounter some kind of error proving `Baz<T>: Send`. In that case,
2157 /// `Bar<T>: Send` didn't turn out to be true.
2158 ///
2159 /// In Issue #60010, we found a bug in rustc where it would cache
2160 /// these intermediate results. This was fixed in #60444 by disabling
2161 /// *all* caching for things involved in a cycle -- in our example,
2162 /// that would mean we don't cache that `Bar<T>: Send`. But this led
2163 /// to large slowdowns.
2164 ///
2165 /// Specifically, imagine this scenario, where proving `Baz<T>: Send`
2166 /// first requires proving `Bar<T>: Send` (which is true:
2167 ///
2168 /// - `Foo<T>: Send` :-
2169 /// - `Bar<T>: Send` :-
2170 /// - `Foo<T>: Send` -- cycle, but ok
2171 /// - `Baz<T>: Send`
2172 /// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
2173 /// - `*const T: Send` -- but what if we later encounter an error?
2174 ///
2175 /// The *provisional evaluation cache* resolves this issue. It stores
2176 /// cache results that we've proven but which were involved in a cycle
2177 /// in some way. We track the minimal stack depth (i.e., the
2178 /// farthest from the top of the stack) that we are dependent on.
2179 /// The idea is that the cache results within are all valid -- so long as
2180 /// none of the nodes in between the current node and the node at that minimum
2181 /// depth result in an error (in which case the cached results are just thrown away).
2182 ///
2183 /// During evaluation, we consult this provisional cache and rely on
2184 /// it. Accessing a cached value is considered equivalent to accessing
2185 /// a result at `reached_depth`, so it marks the *current* solution as
2186 /// provisional as well. If an error is encountered, we toss out any
2187 /// provisional results added from the subtree that encountered the
2188 /// error. When we pop the node at `reached_depth` from the stack, we
2189 /// can commit all the things that remain in the provisional cache.
2190 struct ProvisionalEvaluationCache<'tcx> {
2191 /// next "depth first number" to issue -- just a counter
2192 dfn: Cell<usize>,
2193
2194 /// Stores the "coldest" depth (bottom of stack) reached by any of
2195 /// the evaluation entries. The idea here is that all things in the provisional
2196 /// cache are always dependent on *something* that is colder in the stack:
2197 /// therefore, if we add a new entry that is dependent on something *colder still*,
2198 /// we have to modify the depth for all entries at once.
2199 ///
2200 /// Example:
2201 ///
2202 /// Imagine we have a stack `A B C D E` (with `E` being the top of
2203 /// the stack). We cache something with depth 2, which means that
2204 /// it was dependent on C. Then we pop E but go on and process a
2205 /// new node F: A B C D F. Now F adds something to the cache with
2206 /// depth 1, meaning it is dependent on B. Our original cache
2207 /// entry is also dependent on B, because there is a path from E
2208 /// to C and then from C to F and from F to B.
2209 reached_depth: Cell<usize>,
2210
2211 /// Map from cache key to the provisionally evaluated thing.
2212 /// The cache entries contain the result but also the DFN in which they
2213 /// were added. The DFN is used to clear out values on failure.
2214 ///
2215 /// Imagine we have a stack like:
2216 ///
2217 /// - `A B C` and we add a cache for the result of C (DFN 2)
2218 /// - Then we have a stack `A B D` where `D` has DFN 3
2219 /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
2220 /// - `E` generates various cache entries which have cyclic dependices on `B`
2221 /// - `A B D E F` and so forth
2222 /// - the DFN of `F` for example would be 5
2223 /// - then we determine that `E` is in error -- we will then clear
2224 /// all cache values whose DFN is >= 4 -- in this case, that
2225 /// means the cached value for `F`.
2226 map: RefCell<FxHashMap<ty::PolyTraitRef<'tcx>, ProvisionalEvaluation>>,
2227 }
2228
2229 /// A cache value for the provisional cache: contains the depth-first
2230 /// number (DFN) and result.
2231 #[derive(Copy, Clone, Debug)]
2232 struct ProvisionalEvaluation {
2233 from_dfn: usize,
2234 result: EvaluationResult,
2235 }
2236
2237 impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
2238 fn default() -> Self {
2239 Self { dfn: Cell::new(0), reached_depth: Cell::new(usize::MAX), map: Default::default() }
2240 }
2241 }
2242
2243 impl<'tcx> ProvisionalEvaluationCache<'tcx> {
2244 /// Get the next DFN in sequence (basically a counter).
2245 fn next_dfn(&self) -> usize {
2246 let result = self.dfn.get();
2247 self.dfn.set(result + 1);
2248 result
2249 }
2250
2251 /// Check the provisional cache for any result for
2252 /// `fresh_trait_ref`. If there is a hit, then you must consider
2253 /// it an access to the stack slots at depth
2254 /// `self.current_reached_depth()` and above.
2255 fn get_provisional(&self, fresh_trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> {
2256 debug!(
2257 ?fresh_trait_ref,
2258 reached_depth = ?self.reached_depth.get(),
2259 "get_provisional = {:#?}",
2260 self.map.borrow().get(&fresh_trait_ref),
2261 );
2262 Some(self.map.borrow().get(&fresh_trait_ref)?.result)
2263 }
2264
2265 /// Current value of the `reached_depth` counter -- all the
2266 /// provisional cache entries are dependent on the item at this
2267 /// depth.
2268 fn current_reached_depth(&self) -> usize {
2269 self.reached_depth.get()
2270 }
2271
2272 /// Insert a provisional result into the cache. The result came
2273 /// from the node with the given DFN. It accessed a minimum depth
2274 /// of `reached_depth` to compute. It evaluated `fresh_trait_ref`
2275 /// and resulted in `result`.
2276 fn insert_provisional(
2277 &self,
2278 from_dfn: usize,
2279 reached_depth: usize,
2280 fresh_trait_ref: ty::PolyTraitRef<'tcx>,
2281 result: EvaluationResult,
2282 ) {
2283 debug!(?from_dfn, ?reached_depth, ?fresh_trait_ref, ?result, "insert_provisional");
2284 let r_d = self.reached_depth.get();
2285 self.reached_depth.set(r_d.min(reached_depth));
2286
2287 debug!(reached_depth = self.reached_depth.get());
2288
2289 self.map.borrow_mut().insert(fresh_trait_ref, ProvisionalEvaluation { from_dfn, result });
2290 }
2291
2292 /// Invoked when the node with dfn `dfn` does not get a successful
2293 /// result. This will clear out any provisional cache entries
2294 /// that were added since `dfn` was created. This is because the
2295 /// provisional entries are things which must assume that the
2296 /// things on the stack at the time of their creation succeeded --
2297 /// since the failing node is presently at the top of the stack,
2298 /// these provisional entries must either depend on it or some
2299 /// ancestor of it.
2300 fn on_failure(&self, dfn: usize) {
2301 debug!(?dfn, "on_failure");
2302 self.map.borrow_mut().retain(|key, eval| {
2303 if !eval.from_dfn >= dfn {
2304 debug!("on_failure: removing {:?}", key);
2305 false
2306 } else {
2307 true
2308 }
2309 });
2310 }
2311
2312 /// Invoked when the node at depth `depth` completed without
2313 /// depending on anything higher in the stack (if that completion
2314 /// was a failure, then `on_failure` should have been invoked
2315 /// already). The callback `op` will be invoked for each
2316 /// provisional entry that we can now confirm.
2317 fn on_completion(
2318 &self,
2319 depth: usize,
2320 mut op: impl FnMut(ty::PolyTraitRef<'tcx>, EvaluationResult),
2321 ) {
2322 debug!(?depth, reached_depth = ?self.reached_depth.get(), "on_completion");
2323
2324 if self.reached_depth.get() < depth {
2325 debug!("on_completion: did not yet reach depth to complete");
2326 return;
2327 }
2328
2329 for (fresh_trait_ref, eval) in self.map.borrow_mut().drain() {
2330 debug!(?fresh_trait_ref, ?eval, "on_completion");
2331
2332 op(fresh_trait_ref, eval.result);
2333 }
2334
2335 self.reached_depth.set(usize::MAX);
2336 }
2337 }
2338
2339 #[derive(Copy, Clone)]
2340 struct TraitObligationStackList<'o, 'tcx> {
2341 cache: &'o ProvisionalEvaluationCache<'tcx>,
2342 head: Option<&'o TraitObligationStack<'o, 'tcx>>,
2343 }
2344
2345 impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
2346 fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
2347 TraitObligationStackList { cache, head: None }
2348 }
2349
2350 fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
2351 TraitObligationStackList { cache: r.cache(), head: Some(r) }
2352 }
2353
2354 fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
2355 self.head
2356 }
2357
2358 fn depth(&self) -> usize {
2359 if let Some(head) = self.head { head.depth } else { 0 }
2360 }
2361 }
2362
2363 impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
2364 type Item = &'o TraitObligationStack<'o, 'tcx>;
2365
2366 fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
2367 match self.head {
2368 Some(o) => {
2369 *self = o.previous;
2370 Some(o)
2371 }
2372 None => None,
2373 }
2374 }
2375 }
2376
2377 impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
2378 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2379 write!(f, "TraitObligationStack({:?})", self.obligation)
2380 }
2381 }