Skip to content

perf: Don't store the fields in the interned map #812

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/id.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ impl Id {
/// salsa computations.
#[doc(hidden)]
#[track_caller]
#[inline]
pub const unsafe fn from_u32(v: u32) -> Self {
debug_assert!(v < Self::MAX_U32);
Id {
Expand All @@ -50,6 +51,7 @@ impl Id {
}
}

#[inline]
pub const fn as_u32(self) -> u32 {
self.value.get() - 1
}
Expand Down
37 changes: 27 additions & 10 deletions src/interned.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#![allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety

use std::any::TypeId;
use std::cell::Cell;
use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
use std::marker::PhantomData;
Expand Down Expand Up @@ -60,10 +61,14 @@ pub struct IngredientImpl<C: Configuration> {

/// Maps from data to the existing interned id for that data.
///
/// This doesn't hold the fields themselves to save memory, instead it points to the slot ID.
///
/// Deadlock requirement: We access `value_map` while holding lock on `key_map`, but not vice versa.
key_map: FxDashMap<C::Fields<'static>, Id>,
key_map: FxDashMap<Id, ()>,

memo_table_types: Arc<MemoTableTypes>,

_marker: PhantomData<fn() -> C>,
}

/// Struct storing the interned fields.
Expand Down Expand Up @@ -135,6 +140,7 @@ where
ingredient_index,
key_map: Default::default(),
memo_table_types: Arc::new(MemoTableTypes::default()),
_marker: PhantomData,
}
}

Expand Down Expand Up @@ -193,24 +199,32 @@ where
{
let (zalsa, zalsa_local) = db.zalsas();
let current_revision = zalsa.current_revision();
let table = zalsa.table();

// Optimization to only get read lock on the map if the data has already been interned.
let data_hash = self.key_map.hasher().hash_one(&key);
let shard = &self.key_map.shards()[self.key_map.determine_shard(data_hash as _)];
let eq = |(data, _): &_| {
let found_value = Cell::new(None);
let eq = |(id, _): &_| {
let data = table.get::<Value<C>>(*id);
found_value.set(Some(data));
// SAFETY: it's safe to go from Data<'static> to Data<'db>
// shrink lifetime here to use a single lifetime in Lookup::eq(&StructKey<'db>, &C::Data<'db>)
let data: &C::Fields<'db> = unsafe { std::mem::transmute(data) };
let data = unsafe {
std::mem::transmute::<&C::Fields<'static>, &C::Fields<'db>>(&data.fields)
};
HashEqLike::eq(data, &key)
};

{
let lock = shard.read();
if let Some(bucket) = lock.find(data_hash, eq) {
// SAFETY: Read lock on map is held during this block
let id = unsafe { *bucket.as_ref().1.get() };
let id = unsafe { bucket.as_ref().0 };

let value = zalsa.table().get::<Value<C>>(id);
let value = found_value
.get()
.expect("found the interned, so `found_value` should be set");

// Sync the value's revision.
if value.last_interned_at.load() < current_revision {
Expand Down Expand Up @@ -243,12 +257,16 @@ where
}

let mut lock = shard.write();
match lock.find_or_find_insert_slot(data_hash, eq, |(element, _)| {
self.key_map.hasher().hash_one(element)
match lock.find_or_find_insert_slot(data_hash, eq, |(id, _)| {
// This closure is only called if the table is resized. So while it's expensive to lookup all values,
// it will only happen rarely.
self.key_map
.hasher()
.hash_one(&table.get::<Value<C>>(*id).fields)
}) {
// Data has been interned by a racing call, use that ID instead
Ok(slot) => {
let id = unsafe { *slot.as_ref().1.get() };
let id = unsafe { slot.as_ref().0 };
let value = zalsa.table().get::<Value<C>>(id);

// Sync the value's revision.
Expand Down Expand Up @@ -302,8 +320,7 @@ where

let value = zalsa.table().get::<Value<C>>(id);

let slot_value = (value.fields.clone(), SharedValue::new(id));
unsafe { lock.insert_in_slot(data_hash, slot, slot_value) };
unsafe { lock.insert_in_slot(data_hash, slot, (id, SharedValue::new(()))) };

debug_assert_eq!(
data_hash,
Expand Down
5 changes: 5 additions & 0 deletions src/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ impl PageIndex {
struct SlotIndex(usize);

impl SlotIndex {
#[inline]
fn new(idx: usize) -> Self {
debug_assert!(idx < PAGE_LEN);
Self(idx)
Expand Down Expand Up @@ -218,6 +219,7 @@ impl Table {
/// # Panics
///
/// If `page` is out of bounds or the type `T` is incorrect.
#[inline]
pub(crate) fn page<T: Slot>(&self, page: PageIndex) -> PageView<T> {
self.pages[page.0].assert_type::<T>()
}
Expand Down Expand Up @@ -317,6 +319,7 @@ impl<'p, T: Slot> PageView<'p, T> {
unsafe { slice::from_raw_parts(self.0.data.cast::<PageDataEntry<T>>().as_ptr(), len) }
}

#[inline]
fn data(&self) -> &'p [T] {
let len = self.0.allocated.load(Ordering::Acquire);
// SAFETY: `len` is the initialized length of the page
Expand Down Expand Up @@ -385,6 +388,7 @@ impl Page {
}
}

#[inline]
fn assert_type<T: Slot>(&self) -> PageView<T> {
assert_eq!(
self.slot_type_id,
Expand Down Expand Up @@ -420,6 +424,7 @@ fn make_id(page: PageIndex, slot: SlotIndex) -> Id {
unsafe { Id::from_u32((page << PAGE_LEN_BITS) | slot) }
}

#[inline]
fn split_id(id: Id) -> (PageIndex, SlotIndex) {
let id = id.as_u32() as usize;
let slot = id & PAGE_LEN_MASK;
Expand Down