Skip to content

cleanup search graph impl #142460

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions compiler/rustc_type_ir/src/search_graph/global_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use super::{AvailableDepth, Cx, NestedGoals};
use crate::data_structures::HashMap;

struct Success<X: Cx> {
additional_depth: usize,
required_depth: usize,
nested_goals: NestedGoals<X>,
result: X::Tracked<X::Result>,
}
Expand All @@ -28,7 +28,7 @@ struct CacheEntry<X: Cx> {
#[derive_where(Debug; X: Cx)]
pub(super) struct CacheData<'a, X: Cx> {
pub(super) result: X::Result,
pub(super) additional_depth: usize,
pub(super) required_depth: usize,
pub(super) encountered_overflow: bool,
pub(super) nested_goals: &'a NestedGoals<X>,
}
Expand All @@ -47,21 +47,21 @@ impl<X: Cx> GlobalCache<X> {
origin_result: X::Result,
dep_node: X::DepNodeIndex,

additional_depth: usize,
required_depth: usize,
encountered_overflow: bool,
nested_goals: NestedGoals<X>,
) {
let result = cx.mk_tracked(origin_result, dep_node);
let entry = self.map.entry(input).or_default();
if encountered_overflow {
let with_overflow = WithOverflow { nested_goals, result };
let prev = entry.with_overflow.insert(additional_depth, with_overflow);
let prev = entry.with_overflow.insert(required_depth, with_overflow);
if let Some(prev) = &prev {
assert!(cx.evaluation_is_concurrent());
assert_eq!(cx.get_tracked(&prev.result), origin_result);
}
} else {
let prev = entry.success.replace(Success { additional_depth, nested_goals, result });
let prev = entry.success.replace(Success { required_depth, nested_goals, result });
if let Some(prev) = &prev {
assert!(cx.evaluation_is_concurrent());
assert_eq!(cx.get_tracked(&prev.result), origin_result);
Expand All @@ -81,13 +81,13 @@ impl<X: Cx> GlobalCache<X> {
mut candidate_is_applicable: impl FnMut(&NestedGoals<X>) -> bool,
) -> Option<CacheData<'a, X>> {
let entry = self.map.get(&input)?;
if let Some(Success { additional_depth, ref nested_goals, ref result }) = entry.success {
if available_depth.cache_entry_is_applicable(additional_depth)
if let Some(Success { required_depth, ref nested_goals, ref result }) = entry.success {
if available_depth.cache_entry_is_applicable(required_depth)
&& candidate_is_applicable(nested_goals)
{
return Some(CacheData {
result: cx.get_tracked(&result),
additional_depth,
required_depth,
encountered_overflow: false,
nested_goals,
});
Expand All @@ -101,7 +101,7 @@ impl<X: Cx> GlobalCache<X> {
if candidate_is_applicable(nested_goals) {
return Some(CacheData {
result: cx.get_tracked(result),
additional_depth,
required_depth: additional_depth,
encountered_overflow: true,
nested_goals,
});
Expand Down
114 changes: 28 additions & 86 deletions compiler/rustc_type_ir/src/search_graph/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,14 @@ use std::hash::Hash;
use std::marker::PhantomData;

use derive_where::derive_where;
use rustc_index::{Idx, IndexVec};
#[cfg(feature = "nightly")]
use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_NoContext};
use tracing::debug;

use crate::data_structures::HashMap;

mod stack;
use stack::{Stack, StackDepth, StackEntry};
mod global_cache;
use global_cache::CacheData;
pub use global_cache::GlobalCache;
Expand Down Expand Up @@ -225,9 +226,9 @@ impl AvailableDepth {
/// in case there is exponential blowup.
fn allowed_depth_for_nested<D: Delegate>(
root_depth: AvailableDepth,
stack: &IndexVec<StackDepth, StackEntry<D::Cx>>,
stack: &Stack<D::Cx>,
) -> Option<AvailableDepth> {
if let Some(last) = stack.raw.last() {
if let Some(last) = stack.last() {
if last.available_depth.0 == 0 {
return None;
}
Expand Down Expand Up @@ -433,50 +434,6 @@ impl<X: Cx> NestedGoals<X> {
}
}

rustc_index::newtype_index! {
#[orderable]
#[gate_rustc_only]
pub struct StackDepth {}
}

/// Stack entries of the evaluation stack. Its fields tend to be lazily
/// when popping a child goal or completely immutable.
#[derive_where(Debug; X: Cx)]
struct StackEntry<X: Cx> {
input: X::Input,

/// Whether proving this goal is a coinductive step.
///
/// This is used when encountering a trait solver cycle to
/// decide whether the initial provisional result of the cycle.
step_kind_from_parent: PathKind,

/// The available depth of a given goal, immutable.
available_depth: AvailableDepth,

/// The maximum depth reached by this stack entry, only up-to date
/// for the top of the stack and lazily updated for the rest.
reached_depth: StackDepth,

/// All cycle heads this goal depends on. Lazily updated and only
/// up-to date for the top of the stack.
heads: CycleHeads,

/// Whether evaluating this goal encountered overflow. Lazily updated.
encountered_overflow: bool,

/// Whether this goal has been used as the root of a cycle. This gets
/// eagerly updated when encountering a cycle.
has_been_used: Option<UsageKind>,

/// The nested goals of this goal, see the doc comment of the type.
nested_goals: NestedGoals<X>,

/// Starts out as `None` and gets set when rerunning this
/// goal in case we encounter a cycle.
provisional_result: Option<X::Result>,
}

/// A provisional result of an already computed goals which depends on other
/// goals still on the stack.
#[derive_where(Debug; X: Cx)]
Expand All @@ -498,7 +455,7 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> {
/// The stack of goals currently being computed.
///
/// An element is *deeper* in the stack if its index is *lower*.
stack: IndexVec<StackDepth, StackEntry<X>>,
stack: Stack<X>,
/// The provisional cache contains entries for already computed goals which
/// still depend on goals higher-up in the stack. We don't move them to the
/// global cache and track them locally instead. A provisional cache entry
Expand Down Expand Up @@ -537,16 +494,16 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// and using existing global cache entries to make sure they
/// have the same impact on the remaining evaluation.
fn update_parent_goal(
stack: &mut IndexVec<StackDepth, StackEntry<X>>,
stack: &mut Stack<X>,
step_kind_from_parent: PathKind,
reached_depth: StackDepth,
required_depth_for_nested: usize,
heads: &CycleHeads,
encountered_overflow: bool,
context: UpdateParentGoalCtxt<'_, X>,
) {
if let Some(parent_index) = stack.last_index() {
let parent = &mut stack[parent_index];
parent.reached_depth = parent.reached_depth.max(reached_depth);
parent.required_depth = parent.required_depth.max(required_depth_for_nested + 1);
parent.encountered_overflow |= encountered_overflow;

parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads);
Expand Down Expand Up @@ -588,13 +545,11 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// the stack which completes the cycle. This given an inductive step AB which then cycles
/// coinductively with A, we need to treat this cycle as coinductive.
fn cycle_path_kind(
stack: &IndexVec<StackDepth, StackEntry<X>>,
stack: &Stack<X>,
step_kind_to_head: PathKind,
head: StackDepth,
) -> PathKind {
stack.raw[head.index() + 1..]
.iter()
.fold(step_kind_to_head, |curr, entry| curr.extend(entry.step_kind_from_parent))
stack.cycle_step_kinds(head).fold(step_kind_to_head, |curr, step| curr.extend(step))
}

/// Probably the most involved method of the whole solver.
Expand Down Expand Up @@ -656,20 +611,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
return result;
}

// Unfortunate, it looks like we actually have to compute this goalrar.
let depth = self.stack.next_index();
let entry = StackEntry {
// Unfortunate, it looks like we actually have to compute this goal.
self.stack.push(StackEntry {
input,
step_kind_from_parent,
available_depth,
reached_depth: depth,
required_depth: 0,
heads: Default::default(),
encountered_overflow: false,
has_been_used: None,
nested_goals: Default::default(),
provisional_result: None,
};
assert_eq!(self.stack.push(entry), depth);
});

// This is for global caching, so we properly track query dependencies.
// Everything that affects the `result` should be performed within this
Expand All @@ -686,7 +639,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
Self::update_parent_goal(
&mut self.stack,
final_entry.step_kind_from_parent,
final_entry.reached_depth,
final_entry.required_depth,
&final_entry.heads,
final_entry.encountered_overflow,
UpdateParentGoalCtxt::Ordinary(&final_entry.nested_goals),
Expand All @@ -700,7 +653,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// the global cache.
assert_eq!(result, expected, "input={input:?}");
} else if D::inspect_is_noop(inspect) {
self.insert_global_cache(cx, input, final_entry, result, dep_node)
self.insert_global_cache(cx, final_entry, result, dep_node)
}
} else if D::ENABLE_PROVISIONAL_CACHE {
debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}");
Expand Down Expand Up @@ -728,7 +681,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
input: X::Input,
inspect: &mut D::ProofTreeBuilder,
) -> X::Result {
if let Some(last) = self.stack.raw.last_mut() {
if let Some(last) = self.stack.last_mut() {
last.encountered_overflow = true;
// If computing a goal `B` depends on another goal `A` and
// `A` has a nested goal which overflows, then computing `B`
Expand Down Expand Up @@ -859,7 +812,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// apply provisional cache entries which encountered overflow once the
// current goal is already part of the same cycle. This check could be
// improved but seems to be good enough for now.
let last = self.stack.raw.last().unwrap();
let last = self.stack.last().unwrap();
if last.heads.opt_lowest_cycle_head().is_none_or(|lowest| lowest > head) {
continue;
}
Expand All @@ -868,14 +821,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
// A provisional cache entry is only valid if the current path from its
// highest cycle head to the goal is the same.
if path_from_head == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head) {
// While we don't have to track the full depth of the provisional cache entry,
// we do have to increment the required depth by one as we'd have already failed
// with overflow otherwise
let next_index = self.stack.next_index();
Self::update_parent_goal(
&mut self.stack,
step_kind_from_parent,
next_index,
0,
heads,
encountered_overflow,
UpdateParentGoalCtxt::ProvisionalCacheHit,
Expand All @@ -893,7 +842,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
/// evaluating this entry would not have ended up depending on either a goal
/// already on the stack or a provisional cache entry.
fn candidate_is_applicable(
stack: &IndexVec<StackDepth, StackEntry<X>>,
stack: &Stack<X>,
step_kind_from_parent: PathKind,
provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>,
nested_goals: &NestedGoals<X>,
Expand Down Expand Up @@ -991,7 +940,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
available_depth: AvailableDepth,
) -> Option<X::Result> {
cx.with_global_cache(|cache| {
let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache
let CacheData { result, required_depth, encountered_overflow, nested_goals } = cache
.get(cx, input, available_depth, |nested_goals| {
Self::candidate_is_applicable(
&self.stack,
Expand All @@ -1001,23 +950,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
)
})?;

// Update the reached depth of the current goal to make sure
// its state is the same regardless of whether we've used the
// global cache or not.
let reached_depth = self.stack.next_index().plus(additional_depth);
// We don't move cycle participants to the global cache, so the
// cycle heads are always empty.
let heads = Default::default();
Self::update_parent_goal(
&mut self.stack,
step_kind_from_parent,
reached_depth,
required_depth,
&heads,
encountered_overflow,
UpdateParentGoalCtxt::Ordinary(nested_goals),
);

debug!(?additional_depth, "global cache hit");
debug!(?required_depth, "global cache hit");
Some(result)
})
}
Expand All @@ -1028,7 +973,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
input: X::Input,
step_kind_from_parent: PathKind,
) -> Option<X::Result> {
let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?;
let head = self.stack.find(input)?;
// We have a nested goal which directly relies on a goal deeper in the stack.
//
// We start by tagging all cycle participants, as that's necessary for caching.
Expand All @@ -1043,10 +988,9 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {

// Subtle: when encountering a cyclic goal, we still first checked for overflow,
// so we have to update the reached depth.
let next_index = self.stack.next_index();
let last_index = self.stack.last_index().unwrap();
let last = &mut self.stack[last_index];
last.reached_depth = last.reached_depth.max(next_index);
last.required_depth = last.required_depth.max(1);

last.nested_goals.insert(input, step_kind_from_parent.into());
last.nested_goals.insert(last.input, PathsToNested::EMPTY);
Expand Down Expand Up @@ -1095,7 +1039,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
let mut i = 0;
loop {
let result = evaluate_goal(self, inspect);
let stack_entry = self.stack.pop().unwrap();
let stack_entry = self.stack.pop();
debug_assert_eq!(stack_entry.input, input);

// If the current goal is not the root of a cycle, we are done.
Expand Down Expand Up @@ -1176,20 +1120,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
fn insert_global_cache(
&mut self,
cx: X,
input: X::Input,
final_entry: StackEntry<X>,
result: X::Result,
dep_node: X::DepNodeIndex,
) {
let additional_depth = final_entry.reached_depth.as_usize() - self.stack.len();
debug!(?final_entry, ?result, "insert global cache");
cx.with_global_cache(|cache| {
cache.insert(
cx,
input,
final_entry.input,
result,
dep_node,
additional_depth,
final_entry.required_depth,
final_entry.encountered_overflow,
final_entry.nested_goals,
)
Expand Down
Loading
Loading