]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_trait_selection/src/traits/select/mod.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / compiler / rustc_trait_selection / src / traits / select / mod.rs
1 //! Candidate selection. See the [rustc dev guide] for more information on how this works.
2 //!
3 //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
4
5 use self::EvaluationResult::*;
6 use self::SelectionCandidate::*;
7
8 use super::coherence::{self, Conflict};
9 use super::const_evaluatable;
10 use super::project;
11 use super::project::normalize_with_depth_to;
12 use super::util;
13 use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
14 use super::wf;
15 use super::DerivedObligationCause;
16 use super::Obligation;
17 use super::ObligationCauseCode;
18 use super::Selection;
19 use super::SelectionResult;
20 use super::TraitQueryMode;
21 use super::{Normalized, ProjectionCacheKey};
22 use super::{ObligationCause, PredicateObligation, TraitObligation};
23 use super::{Overflow, SelectionError, Unimplemented};
24
25 use crate::infer::{InferCtxt, InferOk, TypeFreshener};
26 use crate::traits::error_reporting::InferCtxtExt;
27 use crate::traits::project::ProjectionCacheKeyExt;
28 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
29 use rustc_data_structures::stack::ensure_sufficient_stack;
30 use rustc_errors::ErrorReported;
31 use rustc_hir as hir;
32 use rustc_hir::def_id::DefId;
33 use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
34 use rustc_middle::mir::interpret::ErrorHandled;
35 use rustc_middle::ty::fast_reject;
36 use rustc_middle::ty::print::with_no_trimmed_paths;
37 use rustc_middle::ty::relate::TypeRelation;
38 use rustc_middle::ty::subst::{GenericArgKind, Subst, SubstsRef};
39 use rustc_middle::ty::{
40 self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness,
41 };
42 use rustc_span::symbol::sym;
43
44 use std::cell::{Cell, RefCell};
45 use std::cmp;
46 use std::fmt::{self, Display};
47 use std::iter;
48 use std::rc::Rc;
49
50 pub use rustc_middle::traits::select::*;
51
52 mod candidate_assembly;
53 mod confirmation;
54
55 #[derive(Clone, Debug)]
56 pub enum IntercrateAmbiguityCause {
57 DownstreamCrate { trait_desc: String, self_desc: Option<String> },
58 UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String> },
59 ReservationImpl { message: String },
60 }
61
62 impl IntercrateAmbiguityCause {
63 /// Emits notes when the overlap is caused by complex intercrate ambiguities.
64 /// See #23980 for details.
65 pub fn add_intercrate_ambiguity_hint(&self, err: &mut rustc_errors::DiagnosticBuilder<'_>) {
66 err.note(&self.intercrate_ambiguity_hint());
67 }
68
69 pub fn intercrate_ambiguity_hint(&self) -> String {
70 match self {
71 &IntercrateAmbiguityCause::DownstreamCrate { ref trait_desc, ref self_desc } => {
72 let self_desc = if let &Some(ref ty) = self_desc {
73 format!(" for type `{}`", ty)
74 } else {
75 String::new()
76 };
77 format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
78 }
79 &IntercrateAmbiguityCause::UpstreamCrateUpdate { ref trait_desc, ref self_desc } => {
80 let self_desc = if let &Some(ref ty) = self_desc {
81 format!(" for type `{}`", ty)
82 } else {
83 String::new()
84 };
85 format!(
86 "upstream crates may add a new impl of trait `{}`{} \
87 in future versions",
88 trait_desc, self_desc
89 )
90 }
91 &IntercrateAmbiguityCause::ReservationImpl { ref message } => message.clone(),
92 }
93 }
94 }
95
96 pub struct SelectionContext<'cx, 'tcx> {
97 infcx: &'cx InferCtxt<'cx, 'tcx>,
98
99 /// Freshener used specifically for entries on the obligation
100 /// stack. This ensures that all entries on the stack at one time
101 /// will have the same set of placeholder entries, which is
102 /// important for checking for trait bounds that recursively
103 /// require themselves.
104 freshener: TypeFreshener<'cx, 'tcx>,
105
106 /// If `true`, indicates that the evaluation should be conservative
107 /// and consider the possibility of types outside this crate.
108 /// This comes up primarily when resolving ambiguity. Imagine
109 /// there is some trait reference `$0: Bar` where `$0` is an
110 /// inference variable. If `intercrate` is true, then we can never
111 /// say for sure that this reference is not implemented, even if
112 /// there are *no impls at all for `Bar`*, because `$0` could be
113 /// bound to some type that in a downstream crate that implements
114 /// `Bar`. This is the suitable mode for coherence. Elsewhere,
115 /// though, we set this to false, because we are only interested
116 /// in types that the user could actually have written --- in
117 /// other words, we consider `$0: Bar` to be unimplemented if
118 /// there is no type that the user could *actually name* that
119 /// would satisfy it. This avoids crippling inference, basically.
120 intercrate: bool,
121
122 intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
123
124 /// Controls whether or not to filter out negative impls when selecting.
125 /// This is used in librustdoc to distinguish between the lack of an impl
126 /// and a negative impl
127 allow_negative_impls: bool,
128
129 /// The mode that trait queries run in, which informs our error handling
130 /// policy. In essence, canonicalized queries need their errors propagated
131 /// rather than immediately reported because we do not have accurate spans.
132 query_mode: TraitQueryMode,
133 }
134
135 // A stack that walks back up the stack frame.
136 struct TraitObligationStack<'prev, 'tcx> {
137 obligation: &'prev TraitObligation<'tcx>,
138
139 /// The trait ref from `obligation` but "freshened" with the
140 /// selection-context's freshener. Used to check for recursion.
141 fresh_trait_ref: ty::PolyTraitRef<'tcx>,
142
143 /// Starts out equal to `depth` -- if, during evaluation, we
144 /// encounter a cycle, then we will set this flag to the minimum
145 /// depth of that cycle for all participants in the cycle. These
146 /// participants will then forego caching their results. This is
147 /// not the most efficient solution, but it addresses #60010. The
148 /// problem we are trying to prevent:
149 ///
150 /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
151 /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
152 /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
153 ///
154 /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
155 /// is `EvaluatedToOk`; this is because they were only considered
156 /// ok on the premise that if `A: AutoTrait` held, but we indeed
157 /// encountered a problem (later on) with `A: AutoTrait. So we
158 /// currently set a flag on the stack node for `B: AutoTrait` (as
159 /// well as the second instance of `A: AutoTrait`) to suppress
160 /// caching.
161 ///
162 /// This is a simple, targeted fix. A more-performant fix requires
163 /// deeper changes, but would permit more caching: we could
164 /// basically defer caching until we have fully evaluated the
165 /// tree, and then cache the entire tree at once. In any case, the
166 /// performance impact here shouldn't be so horrible: every time
167 /// this is hit, we do cache at least one trait, so we only
168 /// evaluate each member of a cycle up to N times, where N is the
169 /// length of the cycle. This means the performance impact is
170 /// bounded and we shouldn't have any terrible worst-cases.
171 reached_depth: Cell<usize>,
172
173 previous: TraitObligationStackList<'prev, 'tcx>,
174
175 /// The number of parent frames plus one (thus, the topmost frame has depth 1).
176 depth: usize,
177
178 /// The depth-first number of this node in the search graph -- a
179 /// pre-order index. Basically, a freshly incremented counter.
180 dfn: usize,
181 }
182
183 struct SelectionCandidateSet<'tcx> {
184 // A list of candidates that definitely apply to the current
185 // obligation (meaning: types unify).
186 vec: Vec<SelectionCandidate<'tcx>>,
187
188 // If `true`, then there were candidates that might or might
189 // not have applied, but we couldn't tell. This occurs when some
190 // of the input types are type variables, in which case there are
191 // various "builtin" rules that might or might not trigger.
192 ambiguous: bool,
193 }
194
195 #[derive(PartialEq, Eq, Debug, Clone)]
196 struct EvaluatedCandidate<'tcx> {
197 candidate: SelectionCandidate<'tcx>,
198 evaluation: EvaluationResult,
199 }
200
201 /// When does the builtin impl for `T: Trait` apply?
202 enum BuiltinImplConditions<'tcx> {
203 /// The impl is conditional on `T1, T2, ...: Trait`.
204 Where(ty::Binder<Vec<Ty<'tcx>>>),
205 /// There is no built-in impl. There may be some other
206 /// candidate (a where-clause or user-defined impl).
207 None,
208 /// It is unknown whether there is an impl.
209 Ambiguous,
210 }
211
212 impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
213 pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
214 SelectionContext {
215 infcx,
216 freshener: infcx.freshener(),
217 intercrate: false,
218 intercrate_ambiguity_causes: None,
219 allow_negative_impls: false,
220 query_mode: TraitQueryMode::Standard,
221 }
222 }
223
224 pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
225 SelectionContext {
226 infcx,
227 freshener: infcx.freshener(),
228 intercrate: true,
229 intercrate_ambiguity_causes: None,
230 allow_negative_impls: false,
231 query_mode: TraitQueryMode::Standard,
232 }
233 }
234
235 pub fn with_negative(
236 infcx: &'cx InferCtxt<'cx, 'tcx>,
237 allow_negative_impls: bool,
238 ) -> SelectionContext<'cx, 'tcx> {
239 debug!("with_negative({:?})", allow_negative_impls);
240 SelectionContext {
241 infcx,
242 freshener: infcx.freshener(),
243 intercrate: false,
244 intercrate_ambiguity_causes: None,
245 allow_negative_impls,
246 query_mode: TraitQueryMode::Standard,
247 }
248 }
249
250 pub fn with_query_mode(
251 infcx: &'cx InferCtxt<'cx, 'tcx>,
252 query_mode: TraitQueryMode,
253 ) -> SelectionContext<'cx, 'tcx> {
254 debug!("with_query_mode({:?})", query_mode);
255 SelectionContext {
256 infcx,
257 freshener: infcx.freshener(),
258 intercrate: false,
259 intercrate_ambiguity_causes: None,
260 allow_negative_impls: false,
261 query_mode,
262 }
263 }
264
265 /// Enables tracking of intercrate ambiguity causes. These are
266 /// used in coherence to give improved diagnostics. We don't do
267 /// this until we detect a coherence error because it can lead to
268 /// false overflow results (#47139) and because it costs
269 /// computation time.
270 pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
271 assert!(self.intercrate);
272 assert!(self.intercrate_ambiguity_causes.is_none());
273 self.intercrate_ambiguity_causes = Some(vec![]);
274 debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
275 }
276
277 /// Gets the intercrate ambiguity causes collected since tracking
278 /// was enabled and disables tracking at the same time. If
279 /// tracking is not enabled, just returns an empty vector.
280 pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
281 assert!(self.intercrate);
282 self.intercrate_ambiguity_causes.take().unwrap_or(vec![])
283 }
284
285 pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
286 self.infcx
287 }
288
289 pub fn tcx(&self) -> TyCtxt<'tcx> {
290 self.infcx.tcx
291 }
292
293 pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
294 self.infcx
295 }
296
297 ///////////////////////////////////////////////////////////////////////////
298 // Selection
299 //
300 // The selection phase tries to identify *how* an obligation will
301 // be resolved. For example, it will identify which impl or
302 // parameter bound is to be used. The process can be inconclusive
303 // if the self type in the obligation is not fully inferred. Selection
304 // can result in an error in one of two ways:
305 //
306 // 1. If no applicable impl or parameter bound can be found.
307 // 2. If the output type parameters in the obligation do not match
308 // those specified by the impl/bound. For example, if the obligation
309 // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
310 // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
311
312 /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
313 /// type environment by performing unification.
314 pub fn select(
315 &mut self,
316 obligation: &TraitObligation<'tcx>,
317 ) -> SelectionResult<'tcx, Selection<'tcx>> {
318 debug!("select({:?})", obligation);
319 debug_assert!(!obligation.predicate.has_escaping_bound_vars());
320
321 let pec = &ProvisionalEvaluationCache::default();
322 let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
323
324 let candidate = match self.candidate_from_obligation(&stack) {
325 Err(SelectionError::Overflow) => {
326 // In standard mode, overflow must have been caught and reported
327 // earlier.
328 assert!(self.query_mode == TraitQueryMode::Canonical);
329 return Err(SelectionError::Overflow);
330 }
331 Err(e) => {
332 return Err(e);
333 }
334 Ok(None) => {
335 return Ok(None);
336 }
337 Ok(Some(candidate)) => candidate,
338 };
339
340 match self.confirm_candidate(obligation, candidate) {
341 Err(SelectionError::Overflow) => {
342 assert!(self.query_mode == TraitQueryMode::Canonical);
343 Err(SelectionError::Overflow)
344 }
345 Err(e) => Err(e),
346 Ok(candidate) => Ok(Some(candidate)),
347 }
348 }
349
350 ///////////////////////////////////////////////////////////////////////////
351 // EVALUATION
352 //
353 // Tests whether an obligation can be selected or whether an impl
354 // can be applied to particular types. It skips the "confirmation"
355 // step and hence completely ignores output type parameters.
356 //
357 // The result is "true" if the obligation *may* hold and "false" if
358 // we can be sure it does not.
359
360 /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
361 pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
362 debug!("predicate_may_hold_fatal({:?})", obligation);
363
364 // This fatal query is a stopgap that should only be used in standard mode,
365 // where we do not expect overflow to be propagated.
366 assert!(self.query_mode == TraitQueryMode::Standard);
367
368 self.evaluate_root_obligation(obligation)
369 .expect("Overflow should be caught earlier in standard query mode")
370 .may_apply()
371 }
372
373 /// Evaluates whether the obligation `obligation` can be satisfied
374 /// and returns an `EvaluationResult`. This is meant for the
375 /// *initial* call.
376 pub fn evaluate_root_obligation(
377 &mut self,
378 obligation: &PredicateObligation<'tcx>,
379 ) -> Result<EvaluationResult, OverflowError> {
380 self.evaluation_probe(|this| {
381 this.evaluate_predicate_recursively(
382 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
383 obligation.clone(),
384 )
385 })
386 }
387
388 fn evaluation_probe(
389 &mut self,
390 op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
391 ) -> Result<EvaluationResult, OverflowError> {
392 self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
393 let result = op(self)?;
394
395 match self.infcx.leak_check(true, snapshot) {
396 Ok(()) => {}
397 Err(_) => return Ok(EvaluatedToErr),
398 }
399
400 match self.infcx.region_constraints_added_in_snapshot(snapshot) {
401 None => Ok(result),
402 Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)),
403 }
404 })
405 }
406
407 /// Evaluates the predicates in `predicates` recursively. Note that
408 /// this applies projections in the predicates, and therefore
409 /// is run within an inference probe.
410 fn evaluate_predicates_recursively<'o, I>(
411 &mut self,
412 stack: TraitObligationStackList<'o, 'tcx>,
413 predicates: I,
414 ) -> Result<EvaluationResult, OverflowError>
415 where
416 I: IntoIterator<Item = PredicateObligation<'tcx>>,
417 {
418 let mut result = EvaluatedToOk;
419 for obligation in predicates {
420 let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
421 debug!("evaluate_predicate_recursively({:?}) = {:?}", obligation, eval);
422 if let EvaluatedToErr = eval {
423 // fast-path - EvaluatedToErr is the top of the lattice,
424 // so we don't need to look on the other predicates.
425 return Ok(EvaluatedToErr);
426 } else {
427 result = cmp::max(result, eval);
428 }
429 }
430 Ok(result)
431 }
432
433 fn evaluate_predicate_recursively<'o>(
434 &mut self,
435 previous_stack: TraitObligationStackList<'o, 'tcx>,
436 obligation: PredicateObligation<'tcx>,
437 ) -> Result<EvaluationResult, OverflowError> {
438 debug!(
439 "evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})",
440 previous_stack.head(),
441 obligation
442 );
443
444 // `previous_stack` stores a `TraitObligation`, while `obligation` is
445 // a `PredicateObligation`. These are distinct types, so we can't
446 // use any `Option` combinator method that would force them to be
447 // the same.
448 match previous_stack.head() {
449 Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
450 None => self.check_recursion_limit(&obligation, &obligation)?,
451 }
452
453 ensure_sufficient_stack(|| {
454 match obligation.predicate.skip_binders() {
455 ty::PredicateAtom::Trait(t, _) => {
456 let t = ty::Binder::bind(t);
457 debug_assert!(!t.has_escaping_bound_vars());
458 let obligation = obligation.with(t);
459 self.evaluate_trait_predicate_recursively(previous_stack, obligation)
460 }
461
462 ty::PredicateAtom::Subtype(p) => {
463 let p = ty::Binder::bind(p);
464 // Does this code ever run?
465 match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
466 Some(Ok(InferOk { mut obligations, .. })) => {
467 self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
468 self.evaluate_predicates_recursively(
469 previous_stack,
470 obligations.into_iter(),
471 )
472 }
473 Some(Err(_)) => Ok(EvaluatedToErr),
474 None => Ok(EvaluatedToAmbig),
475 }
476 }
477
478 ty::PredicateAtom::WellFormed(arg) => match wf::obligations(
479 self.infcx,
480 obligation.param_env,
481 obligation.cause.body_id,
482 arg,
483 obligation.cause.span,
484 ) {
485 Some(mut obligations) => {
486 self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
487 self.evaluate_predicates_recursively(
488 previous_stack,
489 obligations.into_iter(),
490 )
491 }
492 None => Ok(EvaluatedToAmbig),
493 },
494
495 ty::PredicateAtom::TypeOutlives(..) | ty::PredicateAtom::RegionOutlives(..) => {
496 // We do not consider region relationships when evaluating trait matches.
497 Ok(EvaluatedToOkModuloRegions)
498 }
499
500 ty::PredicateAtom::ObjectSafe(trait_def_id) => {
501 if self.tcx().is_object_safe(trait_def_id) {
502 Ok(EvaluatedToOk)
503 } else {
504 Ok(EvaluatedToErr)
505 }
506 }
507
508 ty::PredicateAtom::Projection(data) => {
509 let data = ty::Binder::bind(data);
510 let project_obligation = obligation.with(data);
511 match project::poly_project_and_unify_type(self, &project_obligation) {
512 Ok(Ok(Some(mut subobligations))) => {
513 self.add_depth(subobligations.iter_mut(), obligation.recursion_depth);
514 let result = self.evaluate_predicates_recursively(
515 previous_stack,
516 subobligations.into_iter(),
517 );
518 if let Some(key) =
519 ProjectionCacheKey::from_poly_projection_predicate(self, data)
520 {
521 self.infcx.inner.borrow_mut().projection_cache().complete(key);
522 }
523 result
524 }
525 Ok(Ok(None)) => Ok(EvaluatedToAmbig),
526 // EvaluatedToRecur might also be acceptable here, but use
527 // Unknown for now because it means that we won't dismiss a
528 // selection candidate solely because it has a projection
529 // cycle. This is closest to the previous behavior of
530 // immediately erroring.
531 Ok(Err(project::InProgress)) => Ok(EvaluatedToUnknown),
532 Err(_) => Ok(EvaluatedToErr),
533 }
534 }
535
536 ty::PredicateAtom::ClosureKind(_, closure_substs, kind) => {
537 match self.infcx.closure_kind(closure_substs) {
538 Some(closure_kind) => {
539 if closure_kind.extends(kind) {
540 Ok(EvaluatedToOk)
541 } else {
542 Ok(EvaluatedToErr)
543 }
544 }
545 None => Ok(EvaluatedToAmbig),
546 }
547 }
548
549 ty::PredicateAtom::ConstEvaluatable(def_id, substs) => {
550 match const_evaluatable::is_const_evaluatable(
551 self.infcx,
552 def_id,
553 substs,
554 obligation.param_env,
555 obligation.cause.span,
556 ) {
557 Ok(()) => Ok(EvaluatedToOk),
558 Err(ErrorHandled::TooGeneric) => Ok(EvaluatedToAmbig),
559 Err(_) => Ok(EvaluatedToErr),
560 }
561 }
562
563 ty::PredicateAtom::ConstEquate(c1, c2) => {
564 debug!(
565 "evaluate_predicate_recursively: equating consts c1={:?} c2={:?}",
566 c1, c2
567 );
568
569 let evaluate = |c: &'tcx ty::Const<'tcx>| {
570 if let ty::ConstKind::Unevaluated(def, substs, promoted) = c.val {
571 self.infcx
572 .const_eval_resolve(
573 obligation.param_env,
574 def,
575 substs,
576 promoted,
577 Some(obligation.cause.span),
578 )
579 .map(|val| ty::Const::from_value(self.tcx(), val, c.ty))
580 } else {
581 Ok(c)
582 }
583 };
584
585 match (evaluate(c1), evaluate(c2)) {
586 (Ok(c1), Ok(c2)) => {
587 match self
588 .infcx()
589 .at(&obligation.cause, obligation.param_env)
590 .eq(c1, c2)
591 {
592 Ok(_) => Ok(EvaluatedToOk),
593 Err(_) => Ok(EvaluatedToErr),
594 }
595 }
596 (Err(ErrorHandled::Reported(ErrorReported)), _)
597 | (_, Err(ErrorHandled::Reported(ErrorReported))) => Ok(EvaluatedToErr),
598 (Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
599 span_bug!(
600 obligation.cause.span(self.tcx()),
601 "ConstEquate: const_eval_resolve returned an unexpected error"
602 )
603 }
604 (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
605 Ok(EvaluatedToAmbig)
606 }
607 }
608 }
609 ty::PredicateAtom::TypeWellFormedFromEnv(..) => {
610 bug!("TypeWellFormedFromEnv is only used for chalk")
611 }
612 }
613 })
614 }
615
616 fn evaluate_trait_predicate_recursively<'o>(
617 &mut self,
618 previous_stack: TraitObligationStackList<'o, 'tcx>,
619 mut obligation: TraitObligation<'tcx>,
620 ) -> Result<EvaluationResult, OverflowError> {
621 debug!("evaluate_trait_predicate_recursively({:?})", obligation);
622
623 if !self.intercrate
624 && obligation.is_global()
625 && obligation.param_env.caller_bounds().iter().all(|bound| bound.needs_subst())
626 {
627 // If a param env has no global bounds, global obligations do not
628 // depend on its particular value in order to work, so we can clear
629 // out the param env and get better caching.
630 debug!("evaluate_trait_predicate_recursively({:?}) - in global", obligation);
631 obligation.param_env = obligation.param_env.without_caller_bounds();
632 }
633
634 let stack = self.push_stack(previous_stack, &obligation);
635 let fresh_trait_ref = stack.fresh_trait_ref;
636 if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) {
637 debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
638 return Ok(result);
639 }
640
641 if let Some(result) = stack.cache().get_provisional(fresh_trait_ref) {
642 debug!("PROVISIONAL CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
643 stack.update_reached_depth(stack.cache().current_reached_depth());
644 return Ok(result);
645 }
646
647 // Check if this is a match for something already on the
648 // stack. If so, we don't want to insert the result into the
649 // main cache (it is cycle dependent) nor the provisional
650 // cache (which is meant for things that have completed but
651 // for a "backedge" -- this result *is* the backedge).
652 if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
653 return Ok(cycle_result);
654 }
655
656 let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
657 let result = result?;
658
659 if !result.must_apply_modulo_regions() {
660 stack.cache().on_failure(stack.dfn);
661 }
662
663 let reached_depth = stack.reached_depth.get();
664 if reached_depth >= stack.depth {
665 debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result);
666 self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result);
667
668 stack.cache().on_completion(stack.depth, |fresh_trait_ref, provisional_result| {
669 self.insert_evaluation_cache(
670 obligation.param_env,
671 fresh_trait_ref,
672 dep_node,
673 provisional_result.max(result),
674 );
675 });
676 } else {
677 debug!("PROVISIONAL: {:?}={:?}", fresh_trait_ref, result);
678 debug!(
679 "evaluate_trait_predicate_recursively: caching provisionally because {:?} \
680 is a cycle participant (at depth {}, reached depth {})",
681 fresh_trait_ref, stack.depth, reached_depth,
682 );
683
684 stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_ref, result);
685 }
686
687 Ok(result)
688 }
689
690 /// If there is any previous entry on the stack that precisely
691 /// matches this obligation, then we can assume that the
692 /// obligation is satisfied for now (still all other conditions
693 /// must be met of course). One obvious case this comes up is
694 /// marker traits like `Send`. Think of a linked list:
695 ///
696 /// struct List<T> { data: T, next: Option<Box<List<T>>> }
697 ///
698 /// `Box<List<T>>` will be `Send` if `T` is `Send` and
699 /// `Option<Box<List<T>>>` is `Send`, and in turn
700 /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
701 /// `Send`.
702 ///
703 /// Note that we do this comparison using the `fresh_trait_ref`
704 /// fields. Because these have all been freshened using
705 /// `self.freshener`, we can be sure that (a) this will not
706 /// affect the inferencer state and (b) that if we see two
707 /// fresh regions with the same index, they refer to the same
708 /// unbound type variable.
709 fn check_evaluation_cycle(
710 &mut self,
711 stack: &TraitObligationStack<'_, 'tcx>,
712 ) -> Option<EvaluationResult> {
713 if let Some(cycle_depth) = stack
714 .iter()
715 .skip(1) // Skip top-most frame.
716 .find(|prev| {
717 stack.obligation.param_env == prev.obligation.param_env
718 && stack.fresh_trait_ref == prev.fresh_trait_ref
719 })
720 .map(|stack| stack.depth)
721 {
722 debug!(
723 "evaluate_stack({:?}) --> recursive at depth {}",
724 stack.fresh_trait_ref, cycle_depth,
725 );
726
727 // If we have a stack like `A B C D E A`, where the top of
728 // the stack is the final `A`, then this will iterate over
729 // `A, E, D, C, B` -- i.e., all the participants apart
730 // from the cycle head. We mark them as participating in a
731 // cycle. This suppresses caching for those nodes. See
732 // `in_cycle` field for more details.
733 stack.update_reached_depth(cycle_depth);
734
735 // Subtle: when checking for a coinductive cycle, we do
736 // not compare using the "freshened trait refs" (which
737 // have erased regions) but rather the fully explicit
738 // trait refs. This is important because it's only a cycle
739 // if the regions match exactly.
740 let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
741 let tcx = self.tcx();
742 let cycle =
743 cycle.map(|stack| stack.obligation.predicate.without_const().to_predicate(tcx));
744 if self.coinductive_match(cycle) {
745 debug!("evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref);
746 Some(EvaluatedToOk)
747 } else {
748 debug!("evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref);
749 Some(EvaluatedToRecur)
750 }
751 } else {
752 None
753 }
754 }
755
756 fn evaluate_stack<'o>(
757 &mut self,
758 stack: &TraitObligationStack<'o, 'tcx>,
759 ) -> Result<EvaluationResult, OverflowError> {
760 // In intercrate mode, whenever any of the generics are unbound,
761 // there can always be an impl. Even if there are no impls in
762 // this crate, perhaps the type would be unified with
763 // something from another crate that does provide an impl.
764 //
765 // In intra mode, we must still be conservative. The reason is
766 // that we want to avoid cycles. Imagine an impl like:
767 //
768 // impl<T:Eq> Eq for Vec<T>
769 //
770 // and a trait reference like `$0 : Eq` where `$0` is an
771 // unbound variable. When we evaluate this trait-reference, we
772 // will unify `$0` with `Vec<$1>` (for some fresh variable
773 // `$1`), on the condition that `$1 : Eq`. We will then wind
774 // up with many candidates (since that are other `Eq` impls
775 // that apply) and try to winnow things down. This results in
776 // a recursive evaluation that `$1 : Eq` -- as you can
777 // imagine, this is just where we started. To avoid that, we
778 // check for unbound variables and return an ambiguous (hence possible)
779 // match if we've seen this trait before.
780 //
781 // This suffices to allow chains like `FnMut` implemented in
782 // terms of `Fn` etc, but we could probably make this more
783 // precise still.
784 let unbound_input_types =
785 stack.fresh_trait_ref.skip_binder().substs.types().any(|ty| ty.is_fresh());
786 // This check was an imperfect workaround for a bug in the old
787 // intercrate mode; it should be removed when that goes away.
788 if unbound_input_types && self.intercrate {
789 debug!(
790 "evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
791 stack.fresh_trait_ref
792 );
793 // Heuristics: show the diagnostics when there are no candidates in crate.
794 if self.intercrate_ambiguity_causes.is_some() {
795 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
796 if let Ok(candidate_set) = self.assemble_candidates(stack) {
797 if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
798 let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
799 let self_ty = trait_ref.self_ty();
800 let cause =
801 with_no_trimmed_paths(|| IntercrateAmbiguityCause::DownstreamCrate {
802 trait_desc: trait_ref.print_only_trait_path().to_string(),
803 self_desc: if self_ty.has_concrete_skeleton() {
804 Some(self_ty.to_string())
805 } else {
806 None
807 },
808 });
809
810 debug!("evaluate_stack: pushing cause = {:?}", cause);
811 self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause);
812 }
813 }
814 }
815 return Ok(EvaluatedToAmbig);
816 }
817 if unbound_input_types
818 && stack.iter().skip(1).any(|prev| {
819 stack.obligation.param_env == prev.obligation.param_env
820 && self.match_fresh_trait_refs(
821 stack.fresh_trait_ref,
822 prev.fresh_trait_ref,
823 prev.obligation.param_env,
824 )
825 })
826 {
827 debug!(
828 "evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
829 stack.fresh_trait_ref
830 );
831 return Ok(EvaluatedToUnknown);
832 }
833
834 match self.candidate_from_obligation(stack) {
835 Ok(Some(c)) => self.evaluate_candidate(stack, &c),
836 Ok(None) => Ok(EvaluatedToAmbig),
837 Err(Overflow) => Err(OverflowError),
838 Err(..) => Ok(EvaluatedToErr),
839 }
840 }
841
842 /// For defaulted traits, we use a co-inductive strategy to solve, so
843 /// that recursion is ok. This routine returns `true` if the top of the
844 /// stack (`cycle[0]`):
845 ///
846 /// - is a defaulted trait,
847 /// - it also appears in the backtrace at some position `X`,
848 /// - all the predicates at positions `X..` between `X` and the top are
849 /// also defaulted traits.
850 pub fn coinductive_match<I>(&mut self, cycle: I) -> bool
851 where
852 I: Iterator<Item = ty::Predicate<'tcx>>,
853 {
854 let mut cycle = cycle;
855 cycle.all(|predicate| self.coinductive_predicate(predicate))
856 }
857
858 fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
859 let result = match predicate.skip_binders() {
860 ty::PredicateAtom::Trait(ref data, _) => self.tcx().trait_is_auto(data.def_id()),
861 _ => false,
862 };
863 debug!("coinductive_predicate({:?}) = {:?}", predicate, result);
864 result
865 }
866
867 /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
868 /// obligations are met. Returns whether `candidate` remains viable after this further
869 /// scrutiny.
870 fn evaluate_candidate<'o>(
871 &mut self,
872 stack: &TraitObligationStack<'o, 'tcx>,
873 candidate: &SelectionCandidate<'tcx>,
874 ) -> Result<EvaluationResult, OverflowError> {
875 debug!(
876 "evaluate_candidate: depth={} candidate={:?}",
877 stack.obligation.recursion_depth, candidate
878 );
879 let result = self.evaluation_probe(|this| {
880 let candidate = (*candidate).clone();
881 match this.confirm_candidate(stack.obligation, candidate) {
882 Ok(selection) => this.evaluate_predicates_recursively(
883 stack.list(),
884 selection.nested_obligations().into_iter(),
885 ),
886 Err(..) => Ok(EvaluatedToErr),
887 }
888 })?;
889 debug!(
890 "evaluate_candidate: depth={} result={:?}",
891 stack.obligation.recursion_depth, result
892 );
893 Ok(result)
894 }
895
896 fn check_evaluation_cache(
897 &self,
898 param_env: ty::ParamEnv<'tcx>,
899 trait_ref: ty::PolyTraitRef<'tcx>,
900 ) -> Option<EvaluationResult> {
901 let tcx = self.tcx();
902 if self.can_use_global_caches(param_env) {
903 if let Some(res) = tcx.evaluation_cache.get(&param_env.and(trait_ref), tcx) {
904 return Some(res);
905 }
906 }
907 self.infcx.evaluation_cache.get(&param_env.and(trait_ref), tcx)
908 }
909
910 fn insert_evaluation_cache(
911 &mut self,
912 param_env: ty::ParamEnv<'tcx>,
913 trait_ref: ty::PolyTraitRef<'tcx>,
914 dep_node: DepNodeIndex,
915 result: EvaluationResult,
916 ) {
917 // Avoid caching results that depend on more than just the trait-ref
918 // - the stack can create recursion.
919 if result.is_stack_dependent() {
920 return;
921 }
922
923 if self.can_use_global_caches(param_env) {
924 if !trait_ref.needs_infer() {
925 debug!(
926 "insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global",
927 trait_ref, result,
928 );
929 // This may overwrite the cache with the same value
930 // FIXME: Due to #50507 this overwrites the different values
931 // This should be changed to use HashMapExt::insert_same
932 // when that is fixed
933 self.tcx().evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
934 return;
935 }
936 }
937
938 debug!("insert_evaluation_cache(trait_ref={:?}, candidate={:?})", trait_ref, result,);
939 self.infcx.evaluation_cache.insert(param_env.and(trait_ref), dep_node, result);
940 }
941
942 /// For various reasons, it's possible for a subobligation
943 /// to have a *lower* recursion_depth than the obligation used to create it.
944 /// Projection sub-obligations may be returned from the projection cache,
945 /// which results in obligations with an 'old' `recursion_depth`.
946 /// Additionally, methods like `wf::obligations` and
947 /// `InferCtxt.subtype_predicate` produce subobligations without
948 /// taking in a 'parent' depth, causing the generated subobligations
949 /// to have a `recursion_depth` of `0`.
950 ///
951 /// To ensure that obligation_depth never decreasees, we force all subobligations
952 /// to have at least the depth of the original obligation.
953 fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(
954 &self,
955 it: I,
956 min_depth: usize,
957 ) {
958 it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
959 }
960
961 /// Checks that the recursion limit has not been exceeded.
962 ///
963 /// The weird return type of this function allows it to be used with the `try` (`?`)
964 /// operator within certain functions.
965 fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
966 &self,
967 obligation: &Obligation<'tcx, T>,
968 error_obligation: &Obligation<'tcx, V>,
969 ) -> Result<(), OverflowError> {
970 if !self.infcx.tcx.sess.recursion_limit().value_within_limit(obligation.recursion_depth) {
971 match self.query_mode {
972 TraitQueryMode::Standard => {
973 self.infcx().report_overflow_error(error_obligation, true);
974 }
975 TraitQueryMode::Canonical => {
976 return Err(OverflowError);
977 }
978 }
979 }
980 Ok(())
981 }
982
983 fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
984 where
985 OP: FnOnce(&mut Self) -> R,
986 {
987 let (result, dep_node) =
988 self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || op(self));
989 self.tcx().dep_graph.read_index(dep_node);
990 (result, dep_node)
991 }
992
993 // Treat negative impls as unimplemented, and reservation impls as ambiguity.
994 fn filter_negative_and_reservation_impls(
995 &mut self,
996 candidate: SelectionCandidate<'tcx>,
997 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
998 if let ImplCandidate(def_id) = candidate {
999 let tcx = self.tcx();
1000 match tcx.impl_polarity(def_id) {
1001 ty::ImplPolarity::Negative if !self.allow_negative_impls => {
1002 return Err(Unimplemented);
1003 }
1004 ty::ImplPolarity::Reservation => {
1005 if let Some(intercrate_ambiguity_clauses) =
1006 &mut self.intercrate_ambiguity_causes
1007 {
1008 let attrs = tcx.get_attrs(def_id);
1009 let attr = tcx.sess.find_by_name(&attrs, sym::rustc_reservation_impl);
1010 let value = attr.and_then(|a| a.value_str());
1011 if let Some(value) = value {
1012 debug!(
1013 "filter_negative_and_reservation_impls: \
1014 reservation impl ambiguity on {:?}",
1015 def_id
1016 );
1017 intercrate_ambiguity_clauses.push(
1018 IntercrateAmbiguityCause::ReservationImpl {
1019 message: value.to_string(),
1020 },
1021 );
1022 }
1023 }
1024 return Ok(None);
1025 }
1026 _ => {}
1027 };
1028 }
1029 Ok(Some(candidate))
1030 }
1031
1032 fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
1033 debug!("is_knowable(intercrate={:?})", self.intercrate);
1034
1035 if !self.intercrate {
1036 return None;
1037 }
1038
1039 let obligation = &stack.obligation;
1040 let predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
1041
1042 // Okay to skip binder because of the nature of the
1043 // trait-ref-is-knowable check, which does not care about
1044 // bound regions.
1045 let trait_ref = predicate.skip_binder().trait_ref;
1046
1047 coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
1048 }
1049
1050 /// Returns `true` if the global caches can be used.
1051 /// Do note that if the type itself is not in the
1052 /// global tcx, the local caches will be used.
1053 fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
1054 // If there are any inference variables in the `ParamEnv`, then we
1055 // always use a cache local to this particular scope. Otherwise, we
1056 // switch to a global cache.
1057 if param_env.needs_infer() {
1058 return false;
1059 }
1060
1061 // Avoid using the master cache during coherence and just rely
1062 // on the local cache. This effectively disables caching
1063 // during coherence. It is really just a simplification to
1064 // avoid us having to fear that coherence results "pollute"
1065 // the master cache. Since coherence executes pretty quickly,
1066 // it's not worth going to more trouble to increase the
1067 // hit-rate, I don't think.
1068 if self.intercrate {
1069 return false;
1070 }
1071
1072 // Otherwise, we can use the global cache.
1073 true
1074 }
1075
1076 fn check_candidate_cache(
1077 &mut self,
1078 param_env: ty::ParamEnv<'tcx>,
1079 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1080 ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
1081 let tcx = self.tcx();
1082 let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref;
1083 if self.can_use_global_caches(param_env) {
1084 if let Some(res) = tcx.selection_cache.get(&param_env.and(*trait_ref), tcx) {
1085 return Some(res);
1086 }
1087 }
1088 self.infcx.selection_cache.get(&param_env.and(*trait_ref), tcx)
1089 }
1090
1091 /// Determines whether can we safely cache the result
1092 /// of selecting an obligation. This is almost always `true`,
1093 /// except when dealing with certain `ParamCandidate`s.
1094 ///
1095 /// Ordinarily, a `ParamCandidate` will contain no inference variables,
1096 /// since it was usually produced directly from a `DefId`. However,
1097 /// certain cases (currently only librustdoc's blanket impl finder),
1098 /// a `ParamEnv` may be explicitly constructed with inference types.
1099 /// When this is the case, we do *not* want to cache the resulting selection
1100 /// candidate. This is due to the fact that it might not always be possible
1101 /// to equate the obligation's trait ref and the candidate's trait ref,
1102 /// if more constraints end up getting added to an inference variable.
1103 ///
1104 /// Because of this, we always want to re-run the full selection
1105 /// process for our obligation the next time we see it, since
1106 /// we might end up picking a different `SelectionCandidate` (or none at all).
1107 fn can_cache_candidate(
1108 &self,
1109 result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1110 ) -> bool {
1111 match result {
1112 Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.needs_infer(),
1113 _ => true,
1114 }
1115 }
1116
1117 fn insert_candidate_cache(
1118 &mut self,
1119 param_env: ty::ParamEnv<'tcx>,
1120 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1121 dep_node: DepNodeIndex,
1122 candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1123 ) {
1124 let tcx = self.tcx();
1125 let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref;
1126
1127 if !self.can_cache_candidate(&candidate) {
1128 debug!(
1129 "insert_candidate_cache(trait_ref={:?}, candidate={:?} -\
1130 candidate is not cacheable",
1131 trait_ref, candidate
1132 );
1133 return;
1134 }
1135
1136 if self.can_use_global_caches(param_env) {
1137 if let Err(Overflow) = candidate {
1138 // Don't cache overflow globally; we only produce this in certain modes.
1139 } else if !trait_ref.needs_infer() {
1140 if !candidate.needs_infer() {
1141 debug!(
1142 "insert_candidate_cache(trait_ref={:?}, candidate={:?}) global",
1143 trait_ref, candidate,
1144 );
1145 // This may overwrite the cache with the same value.
1146 tcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
1147 return;
1148 }
1149 }
1150 }
1151
1152 debug!(
1153 "insert_candidate_cache(trait_ref={:?}, candidate={:?}) local",
1154 trait_ref, candidate,
1155 );
1156 self.infcx.selection_cache.insert(param_env.and(trait_ref), dep_node, candidate);
1157 }
1158
1159 fn match_projection_obligation_against_definition_bounds(
1160 &mut self,
1161 obligation: &TraitObligation<'tcx>,
1162 ) -> bool {
1163 let poly_trait_predicate = self.infcx().resolve_vars_if_possible(&obligation.predicate);
1164 let (placeholder_trait_predicate, _) =
1165 self.infcx().replace_bound_vars_with_placeholders(&poly_trait_predicate);
1166 debug!(
1167 "match_projection_obligation_against_definition_bounds: \
1168 placeholder_trait_predicate={:?}",
1169 placeholder_trait_predicate,
1170 );
1171
1172 let tcx = self.infcx.tcx;
1173 let predicates = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
1174 ty::Projection(ref data) => {
1175 tcx.projection_predicates(data.item_def_id).subst(tcx, data.substs)
1176 }
1177 ty::Opaque(def_id, substs) => tcx.projection_predicates(def_id).subst(tcx, substs),
1178 _ => {
1179 span_bug!(
1180 obligation.cause.span,
1181 "match_projection_obligation_against_definition_bounds() called \
1182 but self-ty is not a projection: {:?}",
1183 placeholder_trait_predicate.trait_ref.self_ty()
1184 );
1185 }
1186 };
1187
1188 let matching_bound = predicates.iter().find_map(|bound| {
1189 if let ty::PredicateAtom::Trait(pred, _) = bound.skip_binders() {
1190 let bound = ty::Binder::bind(pred.trait_ref);
1191 if self.infcx.probe(|_| {
1192 self.match_projection(obligation, bound, placeholder_trait_predicate.trait_ref)
1193 }) {
1194 return Some(bound);
1195 }
1196 }
1197 None
1198 });
1199
1200 debug!(
1201 "match_projection_obligation_against_definition_bounds: \
1202 matching_bound={:?}",
1203 matching_bound
1204 );
1205 match matching_bound {
1206 None => false,
1207 Some(bound) => {
1208 // Repeat the successful match, if any, this time outside of a probe.
1209 let result =
1210 self.match_projection(obligation, bound, placeholder_trait_predicate.trait_ref);
1211
1212 assert!(result);
1213 true
1214 }
1215 }
1216 }
1217
1218 fn match_projection(
1219 &mut self,
1220 obligation: &TraitObligation<'tcx>,
1221 trait_bound: ty::PolyTraitRef<'tcx>,
1222 placeholder_trait_ref: ty::TraitRef<'tcx>,
1223 ) -> bool {
1224 debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
1225 self.infcx
1226 .at(&obligation.cause, obligation.param_env)
1227 .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
1228 .is_ok()
1229 }
1230
1231 fn evaluate_where_clause<'o>(
1232 &mut self,
1233 stack: &TraitObligationStack<'o, 'tcx>,
1234 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1235 ) -> Result<EvaluationResult, OverflowError> {
1236 self.evaluation_probe(|this| {
1237 match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
1238 Ok(obligations) => {
1239 this.evaluate_predicates_recursively(stack.list(), obligations.into_iter())
1240 }
1241 Err(()) => Ok(EvaluatedToErr),
1242 }
1243 })
1244 }
1245
1246 ///////////////////////////////////////////////////////////////////////////
1247 // WINNOW
1248 //
1249 // Winnowing is the process of attempting to resolve ambiguity by
1250 // probing further. During the winnowing process, we unify all
1251 // type variables and then we also attempt to evaluate recursive
1252 // bounds to see if they are satisfied.
1253
1254 /// Returns `true` if `victim` should be dropped in favor of
1255 /// `other`. Generally speaking we will drop duplicate
1256 /// candidates and prefer where-clause candidates.
1257 ///
1258 /// See the comment for "SelectionCandidate" for more details.
1259 fn candidate_should_be_dropped_in_favor_of(
1260 &mut self,
1261 victim: &EvaluatedCandidate<'tcx>,
1262 other: &EvaluatedCandidate<'tcx>,
1263 needs_infer: bool,
1264 ) -> bool {
1265 if victim.candidate == other.candidate {
1266 return true;
1267 }
1268
1269 // Check if a bound would previously have been removed when normalizing
1270 // the param_env so that it can be given the lowest priority. See
1271 // #50825 for the motivation for this.
1272 let is_global =
1273 |cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
1274
1275 // (*) Prefer `BuiltinCandidate { has_nested: false }` and `DiscriminantKindCandidate`
1276 // to anything else.
1277 //
1278 // This is a fix for #53123 and prevents winnowing from accidentally extending the
1279 // lifetime of a variable.
1280 match other.candidate {
1281 // (*)
1282 BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => true,
1283 ParamCandidate(ref cand) => match victim.candidate {
1284 AutoImplCandidate(..) => {
1285 bug!(
1286 "default implementations shouldn't be recorded \
1287 when there are other valid candidates"
1288 );
1289 }
1290 // (*)
1291 BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => false,
1292 ImplCandidate(..)
1293 | ClosureCandidate
1294 | GeneratorCandidate
1295 | FnPointerCandidate
1296 | BuiltinObjectCandidate
1297 | BuiltinUnsizeCandidate
1298 | BuiltinCandidate { .. }
1299 | TraitAliasCandidate(..) => {
1300 // Global bounds from the where clause should be ignored
1301 // here (see issue #50825). Otherwise, we have a where
1302 // clause so don't go around looking for impls.
1303 !is_global(cand)
1304 }
1305 ObjectCandidate | ProjectionCandidate => {
1306 // Arbitrarily give param candidates priority
1307 // over projection and object candidates.
1308 !is_global(cand)
1309 }
1310 ParamCandidate(..) => false,
1311 },
1312 ObjectCandidate | ProjectionCandidate => match victim.candidate {
1313 AutoImplCandidate(..) => {
1314 bug!(
1315 "default implementations shouldn't be recorded \
1316 when there are other valid candidates"
1317 );
1318 }
1319 // (*)
1320 BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate => false,
1321 ImplCandidate(..)
1322 | ClosureCandidate
1323 | GeneratorCandidate
1324 | FnPointerCandidate
1325 | BuiltinObjectCandidate
1326 | BuiltinUnsizeCandidate
1327 | BuiltinCandidate { .. }
1328 | TraitAliasCandidate(..) => true,
1329 ObjectCandidate | ProjectionCandidate => {
1330 // Arbitrarily give param candidates priority
1331 // over projection and object candidates.
1332 true
1333 }
1334 ParamCandidate(ref cand) => is_global(cand),
1335 },
1336 ImplCandidate(other_def) => {
1337 // See if we can toss out `victim` based on specialization.
1338 // This requires us to know *for sure* that the `other` impl applies
1339 // i.e., `EvaluatedToOk`.
1340 if other.evaluation.must_apply_modulo_regions() {
1341 match victim.candidate {
1342 ImplCandidate(victim_def) => {
1343 let tcx = self.tcx();
1344 if tcx.specializes((other_def, victim_def)) {
1345 return true;
1346 }
1347 return match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
1348 Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
1349 // Subtle: If the predicate we are evaluating has inference
1350 // variables, do *not* allow discarding candidates due to
1351 // marker trait impls.
1352 //
1353 // Without this restriction, we could end up accidentally
1354 // constrainting inference variables based on an arbitrarily
1355 // chosen trait impl.
1356 //
1357 // Imagine we have the following code:
1358 //
1359 // ```rust
1360 // #[marker] trait MyTrait {}
1361 // impl MyTrait for u8 {}
1362 // impl MyTrait for bool {}
1363 // ```
1364 //
1365 // And we are evaluating the predicate `<_#0t as MyTrait>`.
1366 //
1367 // During selection, we will end up with one candidate for each
1368 // impl of `MyTrait`. If we were to discard one impl in favor
1369 // of the other, we would be left with one candidate, causing
1370 // us to "successfully" select the predicate, unifying
1371 // _#0t with (for example) `u8`.
1372 //
1373 // However, we have no reason to believe that this unification
1374 // is correct - we've essentially just picked an arbitrary
1375 // *possibility* for _#0t, and required that this be the *only*
1376 // possibility.
1377 //
1378 // Eventually, we will either:
1379 // 1) Unify all inference variables in the predicate through
1380 // some other means (e.g. type-checking of a function). We will
1381 // then be in a position to drop marker trait candidates
1382 // without constraining inference variables (since there are
1383 // none left to constrin)
1384 // 2) Be left with some unconstrained inference variables. We
1385 // will then correctly report an inference error, since the
1386 // existence of multiple marker trait impls tells us nothing
1387 // about which one should actually apply.
1388 !needs_infer
1389 }
1390 Some(_) => true,
1391 None => false,
1392 };
1393 }
1394 ParamCandidate(ref cand) => {
1395 // Prefer the impl to a global where clause candidate.
1396 return is_global(cand);
1397 }
1398 _ => (),
1399 }
1400 }
1401
1402 false
1403 }
1404 ClosureCandidate
1405 | GeneratorCandidate
1406 | FnPointerCandidate
1407 | BuiltinObjectCandidate
1408 | BuiltinUnsizeCandidate
1409 | BuiltinCandidate { has_nested: true } => {
1410 match victim.candidate {
1411 ParamCandidate(ref cand) => {
1412 // Prefer these to a global where-clause bound
1413 // (see issue #50825).
1414 is_global(cand) && other.evaluation.must_apply_modulo_regions()
1415 }
1416 _ => false,
1417 }
1418 }
1419 _ => false,
1420 }
1421 }
1422
1423 fn sized_conditions(
1424 &mut self,
1425 obligation: &TraitObligation<'tcx>,
1426 ) -> BuiltinImplConditions<'tcx> {
1427 use self::BuiltinImplConditions::{Ambiguous, None, Where};
1428
1429 // NOTE: binder moved to (*)
1430 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
1431
1432 match self_ty.kind() {
1433 ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
1434 | ty::Uint(_)
1435 | ty::Int(_)
1436 | ty::Bool
1437 | ty::Float(_)
1438 | ty::FnDef(..)
1439 | ty::FnPtr(_)
1440 | ty::RawPtr(..)
1441 | ty::Char
1442 | ty::Ref(..)
1443 | ty::Generator(..)
1444 | ty::GeneratorWitness(..)
1445 | ty::Array(..)
1446 | ty::Closure(..)
1447 | ty::Never
1448 | ty::Error(_) => {
1449 // safe for everything
1450 Where(ty::Binder::dummy(Vec::new()))
1451 }
1452
1453 ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
1454
1455 ty::Tuple(tys) => {
1456 Where(ty::Binder::bind(tys.last().into_iter().map(|k| k.expect_ty()).collect()))
1457 }
1458
1459 ty::Adt(def, substs) => {
1460 let sized_crit = def.sized_constraint(self.tcx());
1461 // (*) binder moved here
1462 Where(ty::Binder::bind(
1463 sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect(),
1464 ))
1465 }
1466
1467 ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
1468 ty::Infer(ty::TyVar(_)) => Ambiguous,
1469
1470 ty::Placeholder(..)
1471 | ty::Bound(..)
1472 | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1473 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
1474 }
1475 }
1476 }
1477
1478 fn copy_clone_conditions(
1479 &mut self,
1480 obligation: &TraitObligation<'tcx>,
1481 ) -> BuiltinImplConditions<'tcx> {
1482 // NOTE: binder moved to (*)
1483 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
1484
1485 use self::BuiltinImplConditions::{Ambiguous, None, Where};
1486
1487 match self_ty.kind() {
1488 ty::Infer(ty::IntVar(_))
1489 | ty::Infer(ty::FloatVar(_))
1490 | ty::FnDef(..)
1491 | ty::FnPtr(_)
1492 | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
1493
1494 ty::Uint(_)
1495 | ty::Int(_)
1496 | ty::Bool
1497 | ty::Float(_)
1498 | ty::Char
1499 | ty::RawPtr(..)
1500 | ty::Never
1501 | ty::Ref(_, _, hir::Mutability::Not) => {
1502 // Implementations provided in libcore
1503 None
1504 }
1505
1506 ty::Dynamic(..)
1507 | ty::Str
1508 | ty::Slice(..)
1509 | ty::Generator(..)
1510 | ty::GeneratorWitness(..)
1511 | ty::Foreign(..)
1512 | ty::Ref(_, _, hir::Mutability::Mut) => None,
1513
1514 ty::Array(element_ty, _) => {
1515 // (*) binder moved here
1516 Where(ty::Binder::bind(vec![element_ty]))
1517 }
1518
1519 ty::Tuple(tys) => {
1520 // (*) binder moved here
1521 Where(ty::Binder::bind(tys.iter().map(|k| k.expect_ty()).collect()))
1522 }
1523
1524 ty::Closure(_, substs) => {
1525 // (*) binder moved here
1526 Where(ty::Binder::bind(substs.as_closure().upvar_tys().collect()))
1527 }
1528
1529 ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
1530 // Fallback to whatever user-defined impls exist in this case.
1531 None
1532 }
1533
1534 ty::Infer(ty::TyVar(_)) => {
1535 // Unbound type variable. Might or might not have
1536 // applicable impls and so forth, depending on what
1537 // those type variables wind up being bound to.
1538 Ambiguous
1539 }
1540
1541 ty::Placeholder(..)
1542 | ty::Bound(..)
1543 | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1544 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
1545 }
1546 }
1547 }
1548
1549 /// For default impls, we need to break apart a type into its
1550 /// "constituent types" -- meaning, the types that it contains.
1551 ///
1552 /// Here are some (simple) examples:
1553 ///
1554 /// ```
1555 /// (i32, u32) -> [i32, u32]
1556 /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
1557 /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
1558 /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
1559 /// ```
1560 fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
1561 match *t.kind() {
1562 ty::Uint(_)
1563 | ty::Int(_)
1564 | ty::Bool
1565 | ty::Float(_)
1566 | ty::FnDef(..)
1567 | ty::FnPtr(_)
1568 | ty::Str
1569 | ty::Error(_)
1570 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
1571 | ty::Never
1572 | ty::Char => Vec::new(),
1573
1574 ty::Placeholder(..)
1575 | ty::Dynamic(..)
1576 | ty::Param(..)
1577 | ty::Foreign(..)
1578 | ty::Projection(..)
1579 | ty::Bound(..)
1580 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
1581 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
1582 }
1583
1584 ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
1585 vec![element_ty]
1586 }
1587
1588 ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty],
1589
1590 ty::Tuple(ref tys) => {
1591 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
1592 tys.iter().map(|k| k.expect_ty()).collect()
1593 }
1594
1595 ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().collect(),
1596
1597 ty::Generator(_, ref substs, _) => {
1598 let witness = substs.as_generator().witness();
1599 substs.as_generator().upvar_tys().chain(iter::once(witness)).collect()
1600 }
1601
1602 ty::GeneratorWitness(types) => {
1603 // This is sound because no regions in the witness can refer to
1604 // the binder outside the witness. So we'll effectivly reuse
1605 // the implicit binder around the witness.
1606 types.skip_binder().to_vec()
1607 }
1608
1609 // For `PhantomData<T>`, we pass `T`.
1610 ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(),
1611
1612 ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(),
1613
1614 ty::Opaque(def_id, substs) => {
1615 // We can resolve the `impl Trait` to its concrete type,
1616 // which enforces a DAG between the functions requiring
1617 // the auto trait bounds in question.
1618 vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)]
1619 }
1620 }
1621 }
1622
1623 fn collect_predicates_for_types(
1624 &mut self,
1625 param_env: ty::ParamEnv<'tcx>,
1626 cause: ObligationCause<'tcx>,
1627 recursion_depth: usize,
1628 trait_def_id: DefId,
1629 types: ty::Binder<Vec<Ty<'tcx>>>,
1630 ) -> Vec<PredicateObligation<'tcx>> {
1631 // Because the types were potentially derived from
1632 // higher-ranked obligations they may reference late-bound
1633 // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
1634 // yield a type like `for<'a> &'a i32`. In general, we
1635 // maintain the invariant that we never manipulate bound
1636 // regions, so we have to process these bound regions somehow.
1637 //
1638 // The strategy is to:
1639 //
1640 // 1. Instantiate those regions to placeholder regions (e.g.,
1641 // `for<'a> &'a i32` becomes `&0 i32`.
1642 // 2. Produce something like `&'0 i32 : Copy`
1643 // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
1644
1645 types
1646 .skip_binder() // binder moved -\
1647 .iter()
1648 .flat_map(|ty| {
1649 let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/
1650
1651 self.infcx.commit_unconditionally(|_| {
1652 let (placeholder_ty, _) = self.infcx.replace_bound_vars_with_placeholders(&ty);
1653 let Normalized { value: normalized_ty, mut obligations } =
1654 ensure_sufficient_stack(|| {
1655 project::normalize_with_depth(
1656 self,
1657 param_env,
1658 cause.clone(),
1659 recursion_depth,
1660 &placeholder_ty,
1661 )
1662 });
1663 let placeholder_obligation = predicate_for_trait_def(
1664 self.tcx(),
1665 param_env,
1666 cause.clone(),
1667 trait_def_id,
1668 recursion_depth,
1669 normalized_ty,
1670 &[],
1671 );
1672 obligations.push(placeholder_obligation);
1673 obligations
1674 })
1675 })
1676 .collect()
1677 }
1678
1679 ///////////////////////////////////////////////////////////////////////////
1680 // Matching
1681 //
1682 // Matching is a common path used for both evaluation and
1683 // confirmation. It basically unifies types that appear in impls
1684 // and traits. This does affect the surrounding environment;
1685 // therefore, when used during evaluation, match routines must be
1686 // run inside of a `probe()` so that their side-effects are
1687 // contained.
1688
1689 fn rematch_impl(
1690 &mut self,
1691 impl_def_id: DefId,
1692 obligation: &TraitObligation<'tcx>,
1693 ) -> Normalized<'tcx, SubstsRef<'tcx>> {
1694 match self.match_impl(impl_def_id, obligation) {
1695 Ok(substs) => substs,
1696 Err(()) => {
1697 bug!(
1698 "Impl {:?} was matchable against {:?} but now is not",
1699 impl_def_id,
1700 obligation
1701 );
1702 }
1703 }
1704 }
1705
1706 fn match_impl(
1707 &mut self,
1708 impl_def_id: DefId,
1709 obligation: &TraitObligation<'tcx>,
1710 ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
1711 let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
1712
1713 // Before we create the substitutions and everything, first
1714 // consider a "quick reject". This avoids creating more types
1715 // and so forth that we need to.
1716 if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
1717 return Err(());
1718 }
1719
1720 let (placeholder_obligation, _) =
1721 self.infcx().replace_bound_vars_with_placeholders(&obligation.predicate);
1722 let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
1723
1724 let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
1725
1726 let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
1727
1728 let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
1729 ensure_sufficient_stack(|| {
1730 project::normalize_with_depth(
1731 self,
1732 obligation.param_env,
1733 obligation.cause.clone(),
1734 obligation.recursion_depth + 1,
1735 &impl_trait_ref,
1736 )
1737 });
1738
1739 debug!(
1740 "match_impl(impl_def_id={:?}, obligation={:?}, \
1741 impl_trait_ref={:?}, placeholder_obligation_trait_ref={:?})",
1742 impl_def_id, obligation, impl_trait_ref, placeholder_obligation_trait_ref
1743 );
1744
1745 let InferOk { obligations, .. } = self
1746 .infcx
1747 .at(&obligation.cause, obligation.param_env)
1748 .eq(placeholder_obligation_trait_ref, impl_trait_ref)
1749 .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
1750 nested_obligations.extend(obligations);
1751
1752 if !self.intercrate
1753 && self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
1754 {
1755 debug!("match_impl: reservation impls only apply in intercrate mode");
1756 return Err(());
1757 }
1758
1759 debug!("match_impl: success impl_substs={:?}", impl_substs);
1760 Ok(Normalized { value: impl_substs, obligations: nested_obligations })
1761 }
1762
1763 fn fast_reject_trait_refs(
1764 &mut self,
1765 obligation: &TraitObligation<'_>,
1766 impl_trait_ref: &ty::TraitRef<'_>,
1767 ) -> bool {
1768 // We can avoid creating type variables and doing the full
1769 // substitution if we find that any of the input types, when
1770 // simplified, do not match.
1771
1772 obligation.predicate.skip_binder().trait_ref.substs.iter().zip(impl_trait_ref.substs).any(
1773 |(obligation_arg, impl_arg)| {
1774 match (obligation_arg.unpack(), impl_arg.unpack()) {
1775 (GenericArgKind::Type(obligation_ty), GenericArgKind::Type(impl_ty)) => {
1776 let simplified_obligation_ty =
1777 fast_reject::simplify_type(self.tcx(), obligation_ty, true);
1778 let simplified_impl_ty =
1779 fast_reject::simplify_type(self.tcx(), impl_ty, false);
1780
1781 simplified_obligation_ty.is_some()
1782 && simplified_impl_ty.is_some()
1783 && simplified_obligation_ty != simplified_impl_ty
1784 }
1785 (GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => {
1786 // Lifetimes can never cause a rejection.
1787 false
1788 }
1789 (GenericArgKind::Const(_), GenericArgKind::Const(_)) => {
1790 // Conservatively ignore consts (i.e. assume they might
1791 // unify later) until we have `fast_reject` support for
1792 // them (if we'll ever need it, even).
1793 false
1794 }
1795 _ => unreachable!(),
1796 }
1797 },
1798 )
1799 }
1800
1801 /// Normalize `where_clause_trait_ref` and try to match it against
1802 /// `obligation`. If successful, return any predicates that
1803 /// result from the normalization. Normalization is necessary
1804 /// because where-clauses are stored in the parameter environment
1805 /// unnormalized.
1806 fn match_where_clause_trait_ref(
1807 &mut self,
1808 obligation: &TraitObligation<'tcx>,
1809 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1810 ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
1811 self.match_poly_trait_ref(obligation, where_clause_trait_ref)
1812 }
1813
1814 /// Returns `Ok` if `poly_trait_ref` being true implies that the
1815 /// obligation is satisfied.
1816 fn match_poly_trait_ref(
1817 &mut self,
1818 obligation: &TraitObligation<'tcx>,
1819 poly_trait_ref: ty::PolyTraitRef<'tcx>,
1820 ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
1821 debug!(
1822 "match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
1823 obligation, poly_trait_ref
1824 );
1825
1826 self.infcx
1827 .at(&obligation.cause, obligation.param_env)
1828 .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
1829 .map(|InferOk { obligations, .. }| obligations)
1830 .map_err(|_| ())
1831 }
1832
1833 ///////////////////////////////////////////////////////////////////////////
1834 // Miscellany
1835
1836 fn match_fresh_trait_refs(
1837 &self,
1838 previous: ty::PolyTraitRef<'tcx>,
1839 current: ty::PolyTraitRef<'tcx>,
1840 param_env: ty::ParamEnv<'tcx>,
1841 ) -> bool {
1842 let mut matcher = ty::_match::Match::new(self.tcx(), param_env);
1843 matcher.relate(previous, current).is_ok()
1844 }
1845
1846 fn push_stack<'o>(
1847 &mut self,
1848 previous_stack: TraitObligationStackList<'o, 'tcx>,
1849 obligation: &'o TraitObligation<'tcx>,
1850 ) -> TraitObligationStack<'o, 'tcx> {
1851 let fresh_trait_ref =
1852 obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener);
1853
1854 let dfn = previous_stack.cache.next_dfn();
1855 let depth = previous_stack.depth() + 1;
1856 TraitObligationStack {
1857 obligation,
1858 fresh_trait_ref,
1859 reached_depth: Cell::new(depth),
1860 previous: previous_stack,
1861 dfn,
1862 depth,
1863 }
1864 }
1865
1866 fn closure_trait_ref_unnormalized(
1867 &mut self,
1868 obligation: &TraitObligation<'tcx>,
1869 substs: SubstsRef<'tcx>,
1870 ) -> ty::PolyTraitRef<'tcx> {
1871 debug!("closure_trait_ref_unnormalized(obligation={:?}, substs={:?})", obligation, substs);
1872 let closure_sig = substs.as_closure().sig();
1873
1874 debug!("closure_trait_ref_unnormalized: closure_sig = {:?}", closure_sig);
1875
1876 // (1) Feels icky to skip the binder here, but OTOH we know
1877 // that the self-type is an unboxed closure type and hence is
1878 // in fact unparameterized (or at least does not reference any
1879 // regions bound in the obligation). Still probably some
1880 // refactoring could make this nicer.
1881 closure_trait_ref_and_return_type(
1882 self.tcx(),
1883 obligation.predicate.def_id(),
1884 obligation.predicate.skip_binder().self_ty(), // (1)
1885 closure_sig,
1886 util::TupleArgumentsFlag::No,
1887 )
1888 .map_bound(|(trait_ref, _)| trait_ref)
1889 }
1890
1891 fn generator_trait_ref_unnormalized(
1892 &mut self,
1893 obligation: &TraitObligation<'tcx>,
1894 substs: SubstsRef<'tcx>,
1895 ) -> ty::PolyTraitRef<'tcx> {
1896 let gen_sig = substs.as_generator().poly_sig();
1897
1898 // (1) Feels icky to skip the binder here, but OTOH we know
1899 // that the self-type is an generator type and hence is
1900 // in fact unparameterized (or at least does not reference any
1901 // regions bound in the obligation). Still probably some
1902 // refactoring could make this nicer.
1903
1904 super::util::generator_trait_ref_and_outputs(
1905 self.tcx(),
1906 obligation.predicate.def_id(),
1907 obligation.predicate.skip_binder().self_ty(), // (1)
1908 gen_sig,
1909 )
1910 .map_bound(|(trait_ref, ..)| trait_ref)
1911 }
1912
1913 /// Returns the obligations that are implied by instantiating an
1914 /// impl or trait. The obligations are substituted and fully
1915 /// normalized. This is used when confirming an impl or default
1916 /// impl.
1917 fn impl_or_trait_obligations(
1918 &mut self,
1919 cause: ObligationCause<'tcx>,
1920 recursion_depth: usize,
1921 param_env: ty::ParamEnv<'tcx>,
1922 def_id: DefId, // of impl or trait
1923 substs: SubstsRef<'tcx>, // for impl or trait
1924 ) -> Vec<PredicateObligation<'tcx>> {
1925 debug!("impl_or_trait_obligations(def_id={:?})", def_id);
1926 let tcx = self.tcx();
1927
1928 // To allow for one-pass evaluation of the nested obligation,
1929 // each predicate must be preceded by the obligations required
1930 // to normalize it.
1931 // for example, if we have:
1932 // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
1933 // the impl will have the following predicates:
1934 // <V as Iterator>::Item = U,
1935 // U: Iterator, U: Sized,
1936 // V: Iterator, V: Sized,
1937 // <U as Iterator>::Item: Copy
1938 // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
1939 // obligation will normalize to `<$0 as Iterator>::Item = $1` and
1940 // `$1: Copy`, so we must ensure the obligations are emitted in
1941 // that order.
1942 let predicates = tcx.predicates_of(def_id);
1943 assert_eq!(predicates.parent, None);
1944 let mut obligations = Vec::with_capacity(predicates.predicates.len());
1945 for (predicate, _) in predicates.predicates {
1946 let predicate = normalize_with_depth_to(
1947 self,
1948 param_env,
1949 cause.clone(),
1950 recursion_depth,
1951 &predicate.subst(tcx, substs),
1952 &mut obligations,
1953 );
1954 obligations.push(Obligation {
1955 cause: cause.clone(),
1956 recursion_depth,
1957 param_env,
1958 predicate,
1959 });
1960 }
1961
1962 // We are performing deduplication here to avoid exponential blowups
1963 // (#38528) from happening, but the real cause of the duplication is
1964 // unknown. What we know is that the deduplication avoids exponential
1965 // amount of predicates being propagated when processing deeply nested
1966 // types.
1967 //
1968 // This code is hot enough that it's worth avoiding the allocation
1969 // required for the FxHashSet when possible. Special-casing lengths 0,
1970 // 1 and 2 covers roughly 75-80% of the cases.
1971 if obligations.len() <= 1 {
1972 // No possibility of duplicates.
1973 } else if obligations.len() == 2 {
1974 // Only two elements. Drop the second if they are equal.
1975 if obligations[0] == obligations[1] {
1976 obligations.truncate(1);
1977 }
1978 } else {
1979 // Three or more elements. Use a general deduplication process.
1980 let mut seen = FxHashSet::default();
1981 obligations.retain(|i| seen.insert(i.clone()));
1982 }
1983
1984 obligations
1985 }
1986 }
1987
1988 trait TraitObligationExt<'tcx> {
1989 fn derived_cause(
1990 &self,
1991 variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
1992 ) -> ObligationCause<'tcx>;
1993 }
1994
1995 impl<'tcx> TraitObligationExt<'tcx> for TraitObligation<'tcx> {
1996 fn derived_cause(
1997 &self,
1998 variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
1999 ) -> ObligationCause<'tcx> {
2000 /*!
2001 * Creates a cause for obligations that are derived from
2002 * `obligation` by a recursive search (e.g., for a builtin
2003 * bound, or eventually a `auto trait Foo`). If `obligation`
2004 * is itself a derived obligation, this is just a clone, but
2005 * otherwise we create a "derived obligation" cause so as to
2006 * keep track of the original root obligation for error
2007 * reporting.
2008 */
2009
2010 let obligation = self;
2011
2012 // NOTE(flaper87): As of now, it keeps track of the whole error
2013 // chain. Ideally, we should have a way to configure this either
2014 // by using -Z verbose or just a CLI argument.
2015 let derived_cause = DerivedObligationCause {
2016 parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
2017 parent_code: Rc::new(obligation.cause.code.clone()),
2018 };
2019 let derived_code = variant(derived_cause);
2020 ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code)
2021 }
2022 }
2023
2024 impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
2025 fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2026 TraitObligationStackList::with(self)
2027 }
2028
2029 fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
2030 self.previous.cache
2031 }
2032
2033 fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2034 self.list()
2035 }
2036
2037 /// Indicates that attempting to evaluate this stack entry
2038 /// required accessing something from the stack at depth `reached_depth`.
2039 fn update_reached_depth(&self, reached_depth: usize) {
2040 assert!(
2041 self.depth > reached_depth,
2042 "invoked `update_reached_depth` with something under this stack: \
2043 self.depth={} reached_depth={}",
2044 self.depth,
2045 reached_depth,
2046 );
2047 debug!("update_reached_depth(reached_depth={})", reached_depth);
2048 let mut p = self;
2049 while reached_depth < p.depth {
2050 debug!("update_reached_depth: marking {:?} as cycle participant", p.fresh_trait_ref);
2051 p.reached_depth.set(p.reached_depth.get().min(reached_depth));
2052 p = p.previous.head.unwrap();
2053 }
2054 }
2055 }
2056
2057 /// The "provisional evaluation cache" is used to store intermediate cache results
2058 /// when solving auto traits. Auto traits are unusual in that they can support
2059 /// cycles. So, for example, a "proof tree" like this would be ok:
2060 ///
2061 /// - `Foo<T>: Send` :-
2062 /// - `Bar<T>: Send` :-
2063 /// - `Foo<T>: Send` -- cycle, but ok
2064 /// - `Baz<T>: Send`
2065 ///
2066 /// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
2067 /// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
2068 /// For non-auto traits, this cycle would be an error, but for auto traits (because
2069 /// they are coinductive) it is considered ok.
2070 ///
2071 /// However, there is a complication: at the point where we have
2072 /// "proven" `Bar<T>: Send`, we have in fact only proven it
2073 /// *provisionally*. In particular, we proved that `Bar<T>: Send`
2074 /// *under the assumption* that `Foo<T>: Send`. But what if we later
2075 /// find out this assumption is wrong? Specifically, we could
2076 /// encounter some kind of error proving `Baz<T>: Send`. In that case,
2077 /// `Bar<T>: Send` didn't turn out to be true.
2078 ///
2079 /// In Issue #60010, we found a bug in rustc where it would cache
2080 /// these intermediate results. This was fixed in #60444 by disabling
2081 /// *all* caching for things involved in a cycle -- in our example,
2082 /// that would mean we don't cache that `Bar<T>: Send`. But this led
2083 /// to large slowdowns.
2084 ///
2085 /// Specifically, imagine this scenario, where proving `Baz<T>: Send`
2086 /// first requires proving `Bar<T>: Send` (which is true:
2087 ///
2088 /// - `Foo<T>: Send` :-
2089 /// - `Bar<T>: Send` :-
2090 /// - `Foo<T>: Send` -- cycle, but ok
2091 /// - `Baz<T>: Send`
2092 /// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
2093 /// - `*const T: Send` -- but what if we later encounter an error?
2094 ///
2095 /// The *provisional evaluation cache* resolves this issue. It stores
2096 /// cache results that we've proven but which were involved in a cycle
2097 /// in some way. We track the minimal stack depth (i.e., the
2098 /// farthest from the top of the stack) that we are dependent on.
2099 /// The idea is that the cache results within are all valid -- so long as
2100 /// none of the nodes in between the current node and the node at that minimum
2101 /// depth result in an error (in which case the cached results are just thrown away).
2102 ///
2103 /// During evaluation, we consult this provisional cache and rely on
2104 /// it. Accessing a cached value is considered equivalent to accessing
2105 /// a result at `reached_depth`, so it marks the *current* solution as
2106 /// provisional as well. If an error is encountered, we toss out any
2107 /// provisional results added from the subtree that encountered the
2108 /// error. When we pop the node at `reached_depth` from the stack, we
2109 /// can commit all the things that remain in the provisional cache.
2110 struct ProvisionalEvaluationCache<'tcx> {
2111 /// next "depth first number" to issue -- just a counter
2112 dfn: Cell<usize>,
2113
2114 /// Stores the "coldest" depth (bottom of stack) reached by any of
2115 /// the evaluation entries. The idea here is that all things in the provisional
2116 /// cache are always dependent on *something* that is colder in the stack:
2117 /// therefore, if we add a new entry that is dependent on something *colder still*,
2118 /// we have to modify the depth for all entries at once.
2119 ///
2120 /// Example:
2121 ///
2122 /// Imagine we have a stack `A B C D E` (with `E` being the top of
2123 /// the stack). We cache something with depth 2, which means that
2124 /// it was dependent on C. Then we pop E but go on and process a
2125 /// new node F: A B C D F. Now F adds something to the cache with
2126 /// depth 1, meaning it is dependent on B. Our original cache
2127 /// entry is also dependent on B, because there is a path from E
2128 /// to C and then from C to F and from F to B.
2129 reached_depth: Cell<usize>,
2130
2131 /// Map from cache key to the provisionally evaluated thing.
2132 /// The cache entries contain the result but also the DFN in which they
2133 /// were added. The DFN is used to clear out values on failure.
2134 ///
2135 /// Imagine we have a stack like:
2136 ///
2137 /// - `A B C` and we add a cache for the result of C (DFN 2)
2138 /// - Then we have a stack `A B D` where `D` has DFN 3
2139 /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
2140 /// - `E` generates various cache entries which have cyclic dependices on `B`
2141 /// - `A B D E F` and so forth
2142 /// - the DFN of `F` for example would be 5
2143 /// - then we determine that `E` is in error -- we will then clear
2144 /// all cache values whose DFN is >= 4 -- in this case, that
2145 /// means the cached value for `F`.
2146 map: RefCell<FxHashMap<ty::PolyTraitRef<'tcx>, ProvisionalEvaluation>>,
2147 }
2148
2149 /// A cache value for the provisional cache: contains the depth-first
2150 /// number (DFN) and result.
2151 #[derive(Copy, Clone, Debug)]
2152 struct ProvisionalEvaluation {
2153 from_dfn: usize,
2154 result: EvaluationResult,
2155 }
2156
2157 impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
2158 fn default() -> Self {
2159 Self { dfn: Cell::new(0), reached_depth: Cell::new(usize::MAX), map: Default::default() }
2160 }
2161 }
2162
2163 impl<'tcx> ProvisionalEvaluationCache<'tcx> {
2164 /// Get the next DFN in sequence (basically a counter).
2165 fn next_dfn(&self) -> usize {
2166 let result = self.dfn.get();
2167 self.dfn.set(result + 1);
2168 result
2169 }
2170
2171 /// Check the provisional cache for any result for
2172 /// `fresh_trait_ref`. If there is a hit, then you must consider
2173 /// it an access to the stack slots at depth
2174 /// `self.current_reached_depth()` and above.
2175 fn get_provisional(&self, fresh_trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> {
2176 debug!(
2177 "get_provisional(fresh_trait_ref={:?}) = {:#?} with reached-depth {}",
2178 fresh_trait_ref,
2179 self.map.borrow().get(&fresh_trait_ref),
2180 self.reached_depth.get(),
2181 );
2182 Some(self.map.borrow().get(&fresh_trait_ref)?.result)
2183 }
2184
2185 /// Current value of the `reached_depth` counter -- all the
2186 /// provisional cache entries are dependent on the item at this
2187 /// depth.
2188 fn current_reached_depth(&self) -> usize {
2189 self.reached_depth.get()
2190 }
2191
2192 /// Insert a provisional result into the cache. The result came
2193 /// from the node with the given DFN. It accessed a minimum depth
2194 /// of `reached_depth` to compute. It evaluated `fresh_trait_ref`
2195 /// and resulted in `result`.
2196 fn insert_provisional(
2197 &self,
2198 from_dfn: usize,
2199 reached_depth: usize,
2200 fresh_trait_ref: ty::PolyTraitRef<'tcx>,
2201 result: EvaluationResult,
2202 ) {
2203 debug!(
2204 "insert_provisional(from_dfn={}, reached_depth={}, fresh_trait_ref={:?}, result={:?})",
2205 from_dfn, reached_depth, fresh_trait_ref, result,
2206 );
2207 let r_d = self.reached_depth.get();
2208 self.reached_depth.set(r_d.min(reached_depth));
2209
2210 debug!("insert_provisional: reached_depth={:?}", self.reached_depth.get());
2211
2212 self.map.borrow_mut().insert(fresh_trait_ref, ProvisionalEvaluation { from_dfn, result });
2213 }
2214
2215 /// Invoked when the node with dfn `dfn` does not get a successful
2216 /// result. This will clear out any provisional cache entries
2217 /// that were added since `dfn` was created. This is because the
2218 /// provisional entries are things which must assume that the
2219 /// things on the stack at the time of their creation succeeded --
2220 /// since the failing node is presently at the top of the stack,
2221 /// these provisional entries must either depend on it or some
2222 /// ancestor of it.
2223 fn on_failure(&self, dfn: usize) {
2224 debug!("on_failure(dfn={:?})", dfn,);
2225 self.map.borrow_mut().retain(|key, eval| {
2226 if !eval.from_dfn >= dfn {
2227 debug!("on_failure: removing {:?}", key);
2228 false
2229 } else {
2230 true
2231 }
2232 });
2233 }
2234
2235 /// Invoked when the node at depth `depth` completed without
2236 /// depending on anything higher in the stack (if that completion
2237 /// was a failure, then `on_failure` should have been invoked
2238 /// already). The callback `op` will be invoked for each
2239 /// provisional entry that we can now confirm.
2240 fn on_completion(
2241 &self,
2242 depth: usize,
2243 mut op: impl FnMut(ty::PolyTraitRef<'tcx>, EvaluationResult),
2244 ) {
2245 debug!("on_completion(depth={}, reached_depth={})", depth, self.reached_depth.get(),);
2246
2247 if self.reached_depth.get() < depth {
2248 debug!("on_completion: did not yet reach depth to complete");
2249 return;
2250 }
2251
2252 for (fresh_trait_ref, eval) in self.map.borrow_mut().drain() {
2253 debug!("on_completion: fresh_trait_ref={:?} eval={:?}", fresh_trait_ref, eval,);
2254
2255 op(fresh_trait_ref, eval.result);
2256 }
2257
2258 self.reached_depth.set(usize::MAX);
2259 }
2260 }
2261
2262 #[derive(Copy, Clone)]
2263 struct TraitObligationStackList<'o, 'tcx> {
2264 cache: &'o ProvisionalEvaluationCache<'tcx>,
2265 head: Option<&'o TraitObligationStack<'o, 'tcx>>,
2266 }
2267
2268 impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
2269 fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
2270 TraitObligationStackList { cache, head: None }
2271 }
2272
2273 fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
2274 TraitObligationStackList { cache: r.cache(), head: Some(r) }
2275 }
2276
2277 fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
2278 self.head
2279 }
2280
2281 fn depth(&self) -> usize {
2282 if let Some(head) = self.head { head.depth } else { 0 }
2283 }
2284 }
2285
2286 impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
2287 type Item = &'o TraitObligationStack<'o, 'tcx>;
2288
2289 fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
2290 match self.head {
2291 Some(o) => {
2292 *self = o.previous;
2293 Some(o)
2294 }
2295 None => None,
2296 }
2297 }
2298 }
2299
2300 impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
2301 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2302 write!(f, "TraitObligationStack({:?})", self.obligation)
2303 }
2304 }