// seems likely that they should eventually be merged into more
// general routines.
-use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
-use hir::def_id::DefId;
+use dep_graph::{DepGraph, DepKind, DepTrackingMap, DepTrackingMapConfig};
use infer::TransNormalize;
use std::cell::RefCell;
use std::marker::PhantomData;
use syntax::ast;
use syntax_pos::Span;
-use traits::{FulfillmentContext, Obligation, ObligationCause, Reveal, SelectionContext, Vtable};
+use traits::{FulfillmentContext, Obligation, ObligationCause, SelectionContext, Vtable};
use ty::{self, Ty, TyCtxt};
use ty::subst::{Subst, Substs};
use ty::fold::{TypeFoldable, TypeFolder};
/// (necessarily) resolve all nested obligations on the impl. Note
/// that type check should guarantee to us that all nested
/// obligations *could be* resolved if we wanted to.
+ /// Assumes that this is run after the entire crate has been successfully type-checked.
pub fn trans_fulfill_obligation(self,
span: Span,
+ param_env: ty::ParamEnv<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>)
-> Vtable<'tcx, ()>
{
// Remove any references to regions; this helps improve caching.
let trait_ref = self.erase_regions(&trait_ref);
- self.trans_trait_caches.trait_cache.memoize(trait_ref, || {
+ self.trans_trait_caches.trait_cache.memoize((param_env, trait_ref), || {
debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
- trait_ref, trait_ref.def_id());
+ (param_env, trait_ref), trait_ref.def_id());
// Do the initial selection for the obligation. This yields the
// shallow result we are looking for -- that is, what specific impl.
- self.infer_ctxt(()).enter(|infcx| {
+ self.infer_ctxt().enter(|infcx| {
let mut selcx = SelectionContext::new(&infcx);
- let param_env = ty::ParamEnv::empty(Reveal::All);
let obligation_cause = ObligationCause::misc(span,
ast::DUMMY_NODE_ID);
let obligation = Obligation::new(obligation_cause,
}
fn fold<T:TypeFoldable<'gcx>>(&mut self, value: &T) -> T {
- if !value.has_projection_types() {
+ if !value.has_projections() {
value.clone()
} else {
value.fold_with(self)
}
fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> {
- if !ty.has_projection_types() {
+ if !ty.has_projections() {
ty
} else {
self.tcx.trans_trait_caches.project_cache.memoize(ty, || {
}
impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
- type Key = ty::PolyTraitRef<'tcx>;
+ type Key = (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>);
type Value = Vtable<'tcx, ()>;
- fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
- key.to_poly_trait_predicate().dep_node()
+ fn to_dep_kind() -> DepKind {
+ DepKind::TraitSelect
}
}
impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> {
type Key = Ty<'gcx>;
type Value = Ty<'gcx>;
- fn to_dep_node(key: &Self::Key) -> DepNode<DefId> {
- // Ideally, we'd just put `key` into the dep-node, but we
- // can't put full types in there. So just collect up all the
- // def-ids of structs/enums as well as any traits that we
- // project out of. It doesn't matter so much what we do here,
- // except that if we are too coarse, we'll create overly
- // coarse edges between impls and the trans. For example, if
- // we just used the def-id of things we are projecting out of,
- // then the key for `<Foo as SomeTrait>::T` and `<Bar as
- // SomeTrait>::T` would both share a dep-node
- // (`TraitSelect(SomeTrait)`), and hence the impls for both
- // `Foo` and `Bar` would be considered inputs. So a change to
- // `Bar` would affect things that just normalized `Foo`.
- // Anyway, this heuristic is not ideal, but better than
- // nothing.
- let def_ids: Vec<DefId> =
- key.walk()
- .filter_map(|t| match t.sty {
- ty::TyAdt(adt_def, _) => Some(adt_def.did),
- ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
- _ => None,
- })
- .collect();
-
- DepNode::ProjectionCache { def_ids: def_ids }
+ fn to_dep_kind() -> DepKind {
+ DepKind::TraitSelect
}
}