1 //! This module both handles the global cache which stores "finished" goals,
2 //! and the provisional cache which contains partially computed goals.
4 //! The provisional cache is necessary when dealing with coinductive cycles.
6 //! For more information about the provisional cache and coinduction in general,
7 //! check out the relevant section of the rustc-dev-guide.
9 //! FIXME(@lcnr): Write that section, feel free to ping me if you need help here
10 //! before then or if I still haven't done that before January 2023.
11 use super::overflow
::OverflowData
;
12 use super::StackDepth
;
13 use crate::solve
::{CanonicalGoal, QueryResult}
;
14 use rustc_data_structures
::fx
::FxHashMap
;
15 use rustc_index
::vec
::IndexVec
;
16 use rustc_middle
::ty
::TyCtxt
;
18 rustc_index
::newtype_index
! {
19 pub struct EntryIndex {}
22 #[derive(Debug, Clone)]
23 pub(super) struct ProvisionalEntry
<'tcx
> {
24 // In case we have a coinductive cycle, this is the
25 // the currently least restrictive result of this goal.
26 pub(super) response
: QueryResult
<'tcx
>,
27 // In case of a cycle, the position of deepest stack entry involved
28 // in that cycle. This is monotonically decreasing in the stack as all
29 // elements between the current stack element in the deepest stack entry
30 // involved have to also be involved in that cycle.
32 // We can only move entries to the global cache once we're complete done
33 // with the cycle. If this entry has not been involved in a cycle,
34 // this is just its own depth.
35 pub(super) depth
: StackDepth
,
37 // The goal for this entry. Should always be equal to the corresponding goal
38 // in the lookup table.
39 pub(super) goal
: CanonicalGoal
<'tcx
>,
42 pub(super) struct ProvisionalCache
<'tcx
> {
43 pub(super) entries
: IndexVec
<EntryIndex
, ProvisionalEntry
<'tcx
>>,
44 // FIXME: This is only used to quickly check whether a given goal
45 // is in the cache. We should experiment with using something like
46 // `SsoHashSet` here because in most cases there are only a few entries.
47 pub(super) lookup_table
: FxHashMap
<CanonicalGoal
<'tcx
>, EntryIndex
>,
50 impl<'tcx
> ProvisionalCache
<'tcx
> {
51 pub(super) fn empty() -> ProvisionalCache
<'tcx
> {
52 ProvisionalCache { entries: Default::default(), lookup_table: Default::default() }
55 pub(super) fn is_empty(&self) -> bool
{
56 self.entries
.is_empty() && self.lookup_table
.is_empty()
59 /// Adds a dependency from the current leaf to `target` in the cache
60 /// to prevent us from moving any goals which depend on the current leaf
61 /// to the global cache while we're still computing `target`.
63 /// Its important to note that `target` may already be part of a different cycle.
64 /// In this case we have to ensure that we also depend on all other goals
65 /// in the existing cycle in addition to the potentially direct cycle with `target`.
66 pub(super) fn add_dependency_of_leaf_on(&mut self, target
: EntryIndex
) {
67 let depth
= self.entries
[target
].depth
;
68 for provisional_entry
in &mut self.entries
.raw
[target
.index()..] {
69 // The depth of `target` is the position of the deepest goal in the stack
70 // on which `target` depends. That goal is the `root` of this cycle.
72 // Any entry which was added after `target` is either on the stack itself
73 // at which point its depth is definitely at least as high as the depth of
74 // `root`. If it's not on the stack itself it has to depend on a goal
75 // between `root` and `leaf`. If it were to depend on a goal deeper in the
76 // stack than `root`, then `root` would also depend on that goal, at which
77 // point `root` wouldn't be the root anymore.
78 debug_assert
!(provisional_entry
.depth
>= depth
);
79 provisional_entry
.depth
= depth
;
82 // We only update entries which were added after `target` as no other
83 // entry should have a higher depth.
85 // Any entry which previously had a higher depth than target has to
86 // be between `target` and `root`. Because of this we would have updated
87 // its depth when calling `add_dependency_of_leaf_on(root)` for `target`.
88 if cfg
!(debug_assertions
) {
89 self.entries
.iter().all(|e
| e
.depth
<= depth
);
93 pub(super) fn depth(&self, entry_index
: EntryIndex
) -> StackDepth
{
94 self.entries
[entry_index
].depth
97 pub(super) fn provisional_result(&self, entry_index
: EntryIndex
) -> QueryResult
<'tcx
> {
98 self.entries
[entry_index
].response
102 pub(super) fn try_move_finished_goal_to_global_cache
<'tcx
>(
104 overflow_data
: &mut OverflowData
,
105 stack
: &IndexVec
<super::StackDepth
, super::StackElem
<'tcx
>>,
106 goal
: CanonicalGoal
<'tcx
>,
107 response
: QueryResult
<'tcx
>,
109 // We move goals to the global cache if we either did not hit an overflow or if it's
110 // the root goal as that will now always hit the same overflow limit.
112 // NOTE: We cannot move any non-root goals to the global cache even if their final result
113 // isn't impacted by the overflow as that goal still has unstable query dependencies
114 // because it didn't go its full depth.
116 // FIXME(@lcnr): We could still cache subtrees which are not impacted by overflow though.
117 // Tracking that info correctly isn't trivial, so I haven't implemented it for now.
118 let should_cache_globally
= !overflow_data
.did_overflow() || stack
.is_empty();
119 if should_cache_globally
{
120 // FIXME: move the provisional entry to the global cache.
121 let _
= (tcx
, goal
, response
);