]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_trait_selection/src/solve/search_graph/cache.rs
New upstream version 1.69.0+dfsg1
[rustc.git] / compiler / rustc_trait_selection / src / solve / search_graph / cache.rs
1 //! This module both handles the global cache which stores "finished" goals,
2 //! and the provisional cache which contains partially computed goals.
3 //!
4 //! The provisional cache is necessary when dealing with coinductive cycles.
5 //!
6 //! For more information about the provisional cache and coinduction in general,
7 //! check out the relevant section of the rustc-dev-guide.
8 //!
9 //! FIXME(@lcnr): Write that section, feel free to ping me if you need help here
10 //! before then or if I still haven't done that before January 2023.
11 use super::overflow::OverflowData;
12 use super::StackDepth;
13 use crate::solve::{CanonicalGoal, QueryResult};
14 use rustc_data_structures::fx::FxHashMap;
15 use rustc_index::vec::IndexVec;
16 use rustc_middle::ty::TyCtxt;
17
18 rustc_index::newtype_index! {
19 pub struct EntryIndex {}
20 }
21
22 #[derive(Debug, Clone)]
23 pub(super) struct ProvisionalEntry<'tcx> {
24 // In case we have a coinductive cycle, this is the
25 // the currently least restrictive result of this goal.
26 pub(super) response: QueryResult<'tcx>,
27 // In case of a cycle, the position of deepest stack entry involved
28 // in that cycle. This is monotonically decreasing in the stack as all
29 // elements between the current stack element in the deepest stack entry
30 // involved have to also be involved in that cycle.
31 //
32 // We can only move entries to the global cache once we're complete done
33 // with the cycle. If this entry has not been involved in a cycle,
34 // this is just its own depth.
35 pub(super) depth: StackDepth,
36
37 // The goal for this entry. Should always be equal to the corresponding goal
38 // in the lookup table.
39 pub(super) goal: CanonicalGoal<'tcx>,
40 }
41
42 pub(super) struct ProvisionalCache<'tcx> {
43 pub(super) entries: IndexVec<EntryIndex, ProvisionalEntry<'tcx>>,
44 // FIXME: This is only used to quickly check whether a given goal
45 // is in the cache. We should experiment with using something like
46 // `SsoHashSet` here because in most cases there are only a few entries.
47 pub(super) lookup_table: FxHashMap<CanonicalGoal<'tcx>, EntryIndex>,
48 }
49
50 impl<'tcx> ProvisionalCache<'tcx> {
51 pub(super) fn empty() -> ProvisionalCache<'tcx> {
52 ProvisionalCache { entries: Default::default(), lookup_table: Default::default() }
53 }
54
55 pub(super) fn is_empty(&self) -> bool {
56 self.entries.is_empty() && self.lookup_table.is_empty()
57 }
58
59 /// Adds a dependency from the current leaf to `target` in the cache
60 /// to prevent us from moving any goals which depend on the current leaf
61 /// to the global cache while we're still computing `target`.
62 ///
63 /// Its important to note that `target` may already be part of a different cycle.
64 /// In this case we have to ensure that we also depend on all other goals
65 /// in the existing cycle in addition to the potentially direct cycle with `target`.
66 pub(super) fn add_dependency_of_leaf_on(&mut self, target: EntryIndex) {
67 let depth = self.entries[target].depth;
68 for provisional_entry in &mut self.entries.raw[target.index()..] {
69 // The depth of `target` is the position of the deepest goal in the stack
70 // on which `target` depends. That goal is the `root` of this cycle.
71 //
72 // Any entry which was added after `target` is either on the stack itself
73 // at which point its depth is definitely at least as high as the depth of
74 // `root`. If it's not on the stack itself it has to depend on a goal
75 // between `root` and `leaf`. If it were to depend on a goal deeper in the
76 // stack than `root`, then `root` would also depend on that goal, at which
77 // point `root` wouldn't be the root anymore.
78 debug_assert!(provisional_entry.depth >= depth);
79 provisional_entry.depth = depth;
80 }
81
82 // We only update entries which were added after `target` as no other
83 // entry should have a higher depth.
84 //
85 // Any entry which previously had a higher depth than target has to
86 // be between `target` and `root`. Because of this we would have updated
87 // its depth when calling `add_dependency_of_leaf_on(root)` for `target`.
88 if cfg!(debug_assertions) {
89 self.entries.iter().all(|e| e.depth <= depth);
90 }
91 }
92
93 pub(super) fn depth(&self, entry_index: EntryIndex) -> StackDepth {
94 self.entries[entry_index].depth
95 }
96
97 pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> QueryResult<'tcx> {
98 self.entries[entry_index].response
99 }
100 }
101
102 pub(super) fn try_move_finished_goal_to_global_cache<'tcx>(
103 tcx: TyCtxt<'tcx>,
104 overflow_data: &mut OverflowData,
105 stack: &IndexVec<super::StackDepth, super::StackElem<'tcx>>,
106 goal: CanonicalGoal<'tcx>,
107 response: QueryResult<'tcx>,
108 ) {
109 // We move goals to the global cache if we either did not hit an overflow or if it's
110 // the root goal as that will now always hit the same overflow limit.
111 //
112 // NOTE: We cannot move any non-root goals to the global cache even if their final result
113 // isn't impacted by the overflow as that goal still has unstable query dependencies
114 // because it didn't go its full depth.
115 //
116 // FIXME(@lcnr): We could still cache subtrees which are not impacted by overflow though.
117 // Tracking that info correctly isn't trivial, so I haven't implemented it for now.
118 let should_cache_globally = !overflow_data.did_overflow() || stack.is_empty();
119 if should_cache_globally {
120 // FIXME: move the provisional entry to the global cache.
121 let _ = (tcx, goal, response);
122 }
123 }