Squashed 'vendor/ruvector/' content from commit b64c2172

git-subtree-dir: vendor/ruvector
git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
commit d803bfe2b1
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,809 @@
//! Category Theory Benchmarks for Prime-Radiant
//!
//! Benchmarks for category-theoretic operations including:
//! - Functor application
//! - Morphism composition chains
//! - Topos operations (pullback, pushforward, exponential)
//! - Natural transformation computation
//!
//! Target metrics:
//! - Functor application: < 100us per object
//! - Composition chain (100 morphisms): < 1ms
//! - Topos pullback: < 500us
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::collections::HashMap;
// ============================================================================
// CATEGORY THEORY TYPES
// ============================================================================
/// Object identifier
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct ObjectId(u64);
/// Morphism identifier
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct MorphismId(u64);
/// A morphism in a category
#[derive(Clone, Debug)]
struct Morphism {
id: MorphismId,
source: ObjectId,
target: ObjectId,
/// Linear transformation matrix (for VectorCategory)
matrix: Option<Vec<Vec<f64>>>,
}
/// Category structure
struct Category {
objects: HashMap<ObjectId, Object>,
morphisms: HashMap<MorphismId, Morphism>,
/// Composition table: (f, g) -> f . g
compositions: HashMap<(MorphismId, MorphismId), MorphismId>,
/// Identity morphisms
identities: HashMap<ObjectId, MorphismId>,
next_id: u64,
}
/// Object with associated data
#[derive(Clone, Debug)]
struct Object {
id: ObjectId,
dimension: usize,
data: Vec<f64>,
}
impl Category {
fn new() -> Self {
Self {
objects: HashMap::new(),
morphisms: HashMap::new(),
compositions: HashMap::new(),
identities: HashMap::new(),
next_id: 0,
}
}
fn add_object(&mut self, dimension: usize) -> ObjectId {
let id = ObjectId(self.next_id);
self.next_id += 1;
let obj = Object {
id,
dimension,
data: vec![0.0; dimension],
};
self.objects.insert(id, obj);
// Add identity morphism
let mor_id = MorphismId(self.next_id);
self.next_id += 1;
let identity_matrix = (0..dimension)
.map(|i| {
let mut row = vec![0.0; dimension];
row[i] = 1.0;
row
})
.collect();
let identity = Morphism {
id: mor_id,
source: id,
target: id,
matrix: Some(identity_matrix),
};
self.morphisms.insert(mor_id, identity);
self.identities.insert(id, mor_id);
id
}
fn add_morphism(&mut self, source: ObjectId, target: ObjectId, matrix: Vec<Vec<f64>>) -> MorphismId {
let id = MorphismId(self.next_id);
self.next_id += 1;
let morphism = Morphism {
id,
source,
target,
matrix: Some(matrix),
};
self.morphisms.insert(id, morphism);
id
}
fn compose(&mut self, f: MorphismId, g: MorphismId) -> Option<MorphismId> {
// Check if already composed
if let Some(&result) = self.compositions.get(&(f, g)) {
return Some(result);
}
let mor_f = self.morphisms.get(&f)?;
let mor_g = self.morphisms.get(&g)?;
// Check composability: target(g) = source(f)
if mor_g.target != mor_f.source {
return None;
}
// Compose matrices
let mat_f = mor_f.matrix.as_ref()?;
let mat_g = mor_g.matrix.as_ref()?;
let composed_matrix = matrix_multiply(mat_f, mat_g);
let new_id = self.add_morphism(mor_g.source, mor_f.target, composed_matrix);
self.compositions.insert((f, g), new_id);
Some(new_id)
}
fn compose_chain(&mut self, morphisms: &[MorphismId]) -> Option<MorphismId> {
if morphisms.is_empty() {
return None;
}
let mut result = morphisms[0];
for &mor in morphisms.iter().skip(1) {
result = self.compose(result, mor)?;
}
Some(result)
}
}
/// Matrix multiplication
fn matrix_multiply(a: &[Vec<f64>], b: &[Vec<f64>]) -> Vec<Vec<f64>> {
let m = a.len();
let n = if b.is_empty() { 0 } else { b[0].len() };
let k = b.len();
let mut result = vec![vec![0.0; n]; m];
for i in 0..m {
for j in 0..n {
let mut sum = 0.0;
for l in 0..k {
sum += a[i][l] * b[l][j];
}
result[i][j] = sum;
}
}
result
}
// ============================================================================
// FUNCTOR IMPLEMENTATION
// ============================================================================
/// A functor between categories
struct Functor {
/// Object mapping (encoded as transformation)
object_map: Box<dyn Fn(&Object) -> Object + Send + Sync>,
/// Morphism mapping
morphism_map: Box<dyn Fn(&Morphism) -> Morphism + Send + Sync>,
}
impl Functor {
/// Embedding functor: embeds into higher dimension
fn embedding(target_dim: usize) -> Self {
Self {
object_map: Box::new(move |obj| {
let mut data = obj.data.clone();
data.resize(target_dim, 0.0);
Object {
id: obj.id,
dimension: target_dim,
data,
}
}),
morphism_map: Box::new(move |mor| {
let matrix = mor.matrix.as_ref().map(|m| {
let old_dim = m.len();
let mut new_matrix = vec![vec![0.0; target_dim]; target_dim];
// Copy old matrix into top-left corner
for i in 0..old_dim {
for j in 0..m[i].len().min(target_dim) {
new_matrix[i][j] = m[i][j];
}
}
// Extend with identity
for i in old_dim..target_dim {
new_matrix[i][i] = 1.0;
}
new_matrix
});
Morphism {
id: mor.id,
source: mor.source,
target: mor.target,
matrix,
}
}),
}
}
/// Projection functor: projects to lower dimension
fn projection(target_dim: usize) -> Self {
Self {
object_map: Box::new(move |obj| {
let data: Vec<f64> = obj.data.iter().take(target_dim).copied().collect();
Object {
id: obj.id,
dimension: target_dim,
data,
}
}),
morphism_map: Box::new(move |mor| {
let matrix = mor.matrix.as_ref().map(|m| {
m.iter()
.take(target_dim)
.map(|row| row.iter().take(target_dim).copied().collect())
.collect()
});
Morphism {
id: mor.id,
source: mor.source,
target: mor.target,
matrix,
}
}),
}
}
fn apply_object(&self, obj: &Object) -> Object {
(self.object_map)(obj)
}
fn apply_morphism(&self, mor: &Morphism) -> Morphism {
(self.morphism_map)(mor)
}
}
// ============================================================================
// TOPOS OPERATIONS
// ============================================================================
/// Topos structure with subobject classifier
struct Topos {
base_category: Category,
/// Subobject classifier: true/false
omega: ObjectId,
/// Terminal object
terminal: ObjectId,
}
impl Topos {
fn new() -> Self {
let mut cat = Category::new();
// Add terminal object (1-dimensional)
let terminal = cat.add_object(1);
// Add subobject classifier (2-dimensional for true/false)
let omega = cat.add_object(2);
Self {
base_category: cat,
omega,
terminal,
}
}
/// Compute pullback of f: A -> C and g: B -> C
fn pullback(&mut self, f: MorphismId, g: MorphismId) -> Option<(ObjectId, MorphismId, MorphismId)> {
let mor_f = self.base_category.morphisms.get(&f)?;
let mor_g = self.base_category.morphisms.get(&g)?;
// Check that codomain matches
if mor_f.target != mor_g.target {
return None;
}
let obj_a = self.base_category.objects.get(&mor_f.source)?;
let obj_b = self.base_category.objects.get(&mor_g.source)?;
// Pullback object dimension is sum of source dimensions
let pullback_dim = obj_a.dimension + obj_b.dimension;
let pullback_obj = self.base_category.add_object(pullback_dim);
// Create projection morphisms
// p1: A x_C B -> A (projection to first factor)
let p1_matrix: Vec<Vec<f64>> = (0..obj_a.dimension)
.map(|i| {
let mut row = vec![0.0; pullback_dim];
row[i] = 1.0;
row
})
.collect();
// p2: A x_C B -> B (projection to second factor)
let p2_matrix: Vec<Vec<f64>> = (0..obj_b.dimension)
.map(|i| {
let mut row = vec![0.0; pullback_dim];
row[obj_a.dimension + i] = 1.0;
row
})
.collect();
let p1 = self.base_category.add_morphism(pullback_obj, mor_f.source, p1_matrix);
let p2 = self.base_category.add_morphism(pullback_obj, mor_g.source, p2_matrix);
Some((pullback_obj, p1, p2))
}
/// Compute exponential object B^A
fn exponential(&mut self, a: ObjectId, b: ObjectId) -> Option<ObjectId> {
let obj_a = self.base_category.objects.get(&a)?;
let obj_b = self.base_category.objects.get(&b)?;
// Exponential dimension is dim(B)^dim(A) (approximated as product)
let exp_dim = obj_a.dimension * obj_b.dimension;
let exp_obj = self.base_category.add_object(exp_dim);
Some(exp_obj)
}
/// Compute pushout of f: C -> A and g: C -> B
fn pushout(&mut self, f: MorphismId, g: MorphismId) -> Option<(ObjectId, MorphismId, MorphismId)> {
let mor_f = self.base_category.morphisms.get(&f)?;
let mor_g = self.base_category.morphisms.get(&g)?;
// Check that domain matches
if mor_f.source != mor_g.source {
return None;
}
let obj_a = self.base_category.objects.get(&mor_f.target)?;
let obj_b = self.base_category.objects.get(&mor_g.target)?;
// Pushout dimension
let pushout_dim = obj_a.dimension + obj_b.dimension;
let pushout_obj = self.base_category.add_object(pushout_dim);
// Create injection morphisms
let i1_matrix: Vec<Vec<f64>> = (0..pushout_dim)
.map(|i| {
if i < obj_a.dimension {
let mut row = vec![0.0; obj_a.dimension];
row[i] = 1.0;
row
} else {
vec![0.0; obj_a.dimension]
}
})
.collect();
let i2_matrix: Vec<Vec<f64>> = (0..pushout_dim)
.map(|i| {
if i >= obj_a.dimension {
let mut row = vec![0.0; obj_b.dimension];
row[i - obj_a.dimension] = 1.0;
row
} else {
vec![0.0; obj_b.dimension]
}
})
.collect();
let i1 = self.base_category.add_morphism(mor_f.target, pushout_obj, i1_matrix);
let i2 = self.base_category.add_morphism(mor_g.target, pushout_obj, i2_matrix);
Some((pushout_obj, i1, i2))
}
}
// ============================================================================
// NATURAL TRANSFORMATION
// ============================================================================
/// Natural transformation between functors
struct NaturalTransformation {
/// Component morphisms for each object
components: HashMap<ObjectId, Vec<Vec<f64>>>,
}
impl NaturalTransformation {
fn new() -> Self {
Self {
components: HashMap::new(),
}
}
fn add_component(&mut self, obj: ObjectId, matrix: Vec<Vec<f64>>) {
self.components.insert(obj, matrix);
}
fn apply_at(&self, obj: ObjectId, data: &[f64]) -> Option<Vec<f64>> {
let matrix = self.components.get(&obj)?;
Some(matvec(matrix, data))
}
/// Check naturality square for a morphism f: A -> B
fn check_naturality(&self, f: &Morphism, f_prime: &Morphism) -> bool {
// Check: F(f) . eta_A = eta_B . G(f)
let eta_a = match self.components.get(&f.source) {
Some(m) => m,
None => return false,
};
let eta_b = match self.components.get(&f.target) {
Some(m) => m,
None => return false,
};
let mat_f = match &f.matrix {
Some(m) => m,
None => return false,
};
let mat_f_prime = match &f_prime.matrix {
Some(m) => m,
None => return false,
};
// Left side: F(f) . eta_A
let left = matrix_multiply(mat_f_prime, eta_a);
// Right side: eta_B . G(f)
let right = matrix_multiply(eta_b, mat_f);
// Check equality (within tolerance)
matrices_equal(&left, &right, 1e-10)
}
}
fn matvec(matrix: &[Vec<f64>], vec: &[f64]) -> Vec<f64> {
matrix
.iter()
.map(|row| row.iter().zip(vec.iter()).map(|(a, b)| a * b).sum())
.collect()
}
fn matrices_equal(a: &[Vec<f64>], b: &[Vec<f64>], tol: f64) -> bool {
if a.len() != b.len() {
return false;
}
for (row_a, row_b) in a.iter().zip(b.iter()) {
if row_a.len() != row_b.len() {
return false;
}
for (va, vb) in row_a.iter().zip(row_b.iter()) {
if (va - vb).abs() > tol {
return false;
}
}
}
true
}
// ============================================================================
// BENCHMARK DATA GENERATORS
// ============================================================================
fn generate_random_matrix(rows: usize, cols: usize, seed: u64) -> Vec<Vec<f64>> {
let mut rng_state = seed;
(0..rows)
.map(|_| {
(0..cols)
.map(|_| {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
((rng_state >> 33) as f64 / (u32::MAX as f64)) * 2.0 - 1.0
})
.collect()
})
.collect()
}
fn setup_category_with_chain(dimension: usize, chain_length: usize) -> (Category, Vec<MorphismId>) {
let mut cat = Category::new();
let mut objects = Vec::new();
let mut morphisms = Vec::new();
// Create chain of objects
for _ in 0..=chain_length {
objects.push(cat.add_object(dimension));
}
// Create chain of morphisms
for i in 0..chain_length {
let matrix = generate_random_matrix(dimension, dimension, (i as u64) * 42 + 1);
let mor = cat.add_morphism(objects[i], objects[i + 1], matrix);
morphisms.push(mor);
}
(cat, morphisms)
}
// ============================================================================
// BENCHMARKS
// ============================================================================
fn bench_functor_application(c: &mut Criterion) {
let mut group = c.benchmark_group("category/functor");
group.sample_size(100);
for &dim in &[16, 64, 128, 256] {
let target_dim = dim * 2;
let embedding = Functor::embedding(target_dim);
let projection = Functor::projection(dim / 2);
let obj = Object {
id: ObjectId(0),
dimension: dim,
data: (0..dim).map(|i| (i as f64).sin()).collect(),
};
let mor = Morphism {
id: MorphismId(0),
source: ObjectId(0),
target: ObjectId(1),
matrix: Some(generate_random_matrix(dim, dim, 42)),
};
group.throughput(Throughput::Elements(dim as u64));
group.bench_with_input(
BenchmarkId::new("embedding_object", dim),
&(&embedding, &obj),
|b, (functor, obj)| {
b.iter(|| black_box(functor.apply_object(black_box(obj))))
},
);
group.bench_with_input(
BenchmarkId::new("embedding_morphism", dim),
&(&embedding, &mor),
|b, (functor, mor)| {
b.iter(|| black_box(functor.apply_morphism(black_box(mor))))
},
);
group.bench_with_input(
BenchmarkId::new("projection_object", dim),
&(&projection, &obj),
|b, (functor, obj)| {
b.iter(|| black_box(functor.apply_object(black_box(obj))))
},
);
}
group.finish();
}
fn bench_composition_chains(c: &mut Criterion) {
let mut group = c.benchmark_group("category/composition");
group.sample_size(50);
for &chain_length in &[10, 50, 100, 200] {
let dim = 32;
let (mut cat, morphisms) = setup_category_with_chain(dim, chain_length);
group.throughput(Throughput::Elements(chain_length as u64));
group.bench_with_input(
BenchmarkId::new("sequential", chain_length),
&morphisms,
|b, morphisms| {
b.iter_batched(
|| {
let (cat, _) = setup_category_with_chain(dim, chain_length);
cat
},
|mut cat| {
let mut result = morphisms[0];
for &mor in morphisms.iter().skip(1) {
result = cat.compose(result, mor).unwrap();
}
black_box(result)
},
criterion::BatchSize::SmallInput,
)
},
);
group.bench_with_input(
BenchmarkId::new("chain_compose", chain_length),
&morphisms,
|b, morphisms| {
b.iter_batched(
|| {
let (cat, _) = setup_category_with_chain(dim, chain_length);
cat
},
|mut cat| black_box(cat.compose_chain(morphisms)),
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn bench_topos_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("category/topos");
group.sample_size(50);
for &dim in &[8, 16, 32, 64] {
group.throughput(Throughput::Elements(dim as u64));
// Setup for pullback
group.bench_with_input(
BenchmarkId::new("pullback", dim),
&dim,
|b, &dim| {
b.iter_batched(
|| {
let mut topos = Topos::new();
let a = topos.base_category.add_object(dim);
let b = topos.base_category.add_object(dim);
let c = topos.base_category.add_object(dim);
let mat_f = generate_random_matrix(dim, dim, 42);
let mat_g = generate_random_matrix(dim, dim, 43);
let f = topos.base_category.add_morphism(a, c, mat_f);
let g = topos.base_category.add_morphism(b, c, mat_g);
(topos, f, g)
},
|(mut topos, f, g)| black_box(topos.pullback(f, g)),
criterion::BatchSize::SmallInput,
)
},
);
// Pushout
group.bench_with_input(
BenchmarkId::new("pushout", dim),
&dim,
|b, &dim| {
b.iter_batched(
|| {
let mut topos = Topos::new();
let c = topos.base_category.add_object(dim);
let a = topos.base_category.add_object(dim);
let b = topos.base_category.add_object(dim);
let mat_f = generate_random_matrix(dim, dim, 44);
let mat_g = generate_random_matrix(dim, dim, 45);
let f = topos.base_category.add_morphism(c, a, mat_f);
let g = topos.base_category.add_morphism(c, b, mat_g);
(topos, f, g)
},
|(mut topos, f, g)| black_box(topos.pushout(f, g)),
criterion::BatchSize::SmallInput,
)
},
);
// Exponential
group.bench_with_input(
BenchmarkId::new("exponential", dim),
&dim,
|b, &dim| {
b.iter_batched(
|| {
let mut topos = Topos::new();
let a = topos.base_category.add_object(dim);
let b = topos.base_category.add_object(dim);
(topos, a, b)
},
|(mut topos, a, b)| black_box(topos.exponential(a, b)),
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn bench_natural_transformation(c: &mut Criterion) {
let mut group = c.benchmark_group("category/natural_transformation");
group.sample_size(50);
for &dim in &[16, 32, 64, 128] {
let mut nat_trans = NaturalTransformation::new();
// Add components for multiple objects
for i in 0..10 {
let matrix = generate_random_matrix(dim, dim, i * 42);
nat_trans.add_component(ObjectId(i), matrix);
}
let data: Vec<f64> = (0..dim).map(|i| (i as f64).sin()).collect();
group.throughput(Throughput::Elements(dim as u64));
group.bench_with_input(
BenchmarkId::new("apply_component", dim),
&(&nat_trans, ObjectId(0), &data),
|b, (nat_trans, obj, data)| {
b.iter(|| black_box(nat_trans.apply_at(*obj, black_box(data))))
},
);
// Setup naturality check
let f = Morphism {
id: MorphismId(0),
source: ObjectId(0),
target: ObjectId(1),
matrix: Some(generate_random_matrix(dim, dim, 100)),
};
let f_prime = Morphism {
id: MorphismId(1),
source: ObjectId(0),
target: ObjectId(1),
matrix: Some(generate_random_matrix(dim, dim, 101)),
};
group.bench_with_input(
BenchmarkId::new("check_naturality", dim),
&(&nat_trans, &f, &f_prime),
|b, (nat_trans, f, f_prime)| {
b.iter(|| black_box(nat_trans.check_naturality(black_box(f), black_box(f_prime))))
},
);
}
group.finish();
}
fn bench_matrix_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("category/matrix");
group.sample_size(50);
for &dim in &[32, 64, 128, 256] {
let a = generate_random_matrix(dim, dim, 42);
let b = generate_random_matrix(dim, dim, 43);
let v: Vec<f64> = (0..dim).map(|i| (i as f64).sin()).collect();
group.throughput(Throughput::Elements((dim * dim) as u64));
group.bench_with_input(
BenchmarkId::new("multiply", dim),
&(&a, &b),
|b, (a, b_mat)| {
b.iter(|| black_box(matrix_multiply(black_box(a), black_box(b_mat))))
},
);
group.bench_with_input(
BenchmarkId::new("matvec", dim),
&(&a, &v),
|b, (a, v)| {
b.iter(|| black_box(matvec(black_box(a), black_box(v))))
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_functor_application,
bench_composition_chains,
bench_topos_operations,
bench_natural_transformation,
bench_matrix_operations,
);
criterion_main!(benches);

View File

@@ -0,0 +1,853 @@
//! Causal Reasoning Benchmarks for Prime-Radiant
//!
//! Benchmarks for causal inference operations including:
//! - Intervention computation (do-calculus)
//! - Counterfactual queries
//! - Causal abstraction verification
//! - Structural causal model operations
//!
//! Target metrics:
//! - Intervention: < 1ms per intervention
//! - Counterfactual: < 5ms per query
//! - Abstraction verification: < 10ms for moderate models
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::collections::{HashMap, HashSet, VecDeque};
// ============================================================================
// CAUSAL MODEL TYPES
// ============================================================================
/// Variable identifier
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct VariableId(usize);
/// Variable value
#[derive(Clone, Debug)]
enum Value {
Continuous(f64),
Discrete(i64),
Vector(Vec<f64>),
}
impl Value {
fn as_f64(&self) -> f64 {
match self {
Value::Continuous(v) => *v,
Value::Discrete(v) => *v as f64,
Value::Vector(v) => v.iter().sum(),
}
}
}
/// Structural equation: V = f(Pa(V), U_V)
struct StructuralEquation {
variable: VariableId,
parents: Vec<VariableId>,
/// Function mapping parent values to variable value
function: Box<dyn Fn(&[Value]) -> Value + Send + Sync>,
}
/// Structural Causal Model
struct CausalModel {
variables: HashMap<VariableId, String>,
variable_ids: HashMap<String, VariableId>,
parents: HashMap<VariableId, Vec<VariableId>>,
children: HashMap<VariableId, Vec<VariableId>>,
equations: HashMap<VariableId, Box<dyn Fn(&[Value]) -> Value + Send + Sync>>,
exogenous: HashMap<VariableId, Value>,
next_id: usize,
}
impl CausalModel {
fn new() -> Self {
Self {
variables: HashMap::new(),
variable_ids: HashMap::new(),
parents: HashMap::new(),
children: HashMap::new(),
equations: HashMap::new(),
exogenous: HashMap::new(),
next_id: 0,
}
}
fn add_variable(&mut self, name: &str) -> VariableId {
let id = VariableId(self.next_id);
self.next_id += 1;
self.variables.insert(id, name.to_string());
self.variable_ids.insert(name.to_string(), id);
self.parents.insert(id, Vec::new());
self.children.insert(id, Vec::new());
// Default exogenous value
self.exogenous.insert(id, Value::Continuous(0.0));
id
}
fn add_edge(&mut self, from: VariableId, to: VariableId) {
self.parents.get_mut(&to).unwrap().push(from);
self.children.get_mut(&from).unwrap().push(to);
}
fn set_equation<F>(&mut self, var: VariableId, func: F)
where
F: Fn(&[Value]) -> Value + Send + Sync + 'static,
{
self.equations.insert(var, Box::new(func));
}
fn set_exogenous(&mut self, var: VariableId, value: Value) {
self.exogenous.insert(var, value);
}
fn topological_order(&self) -> Vec<VariableId> {
let mut order = Vec::new();
let mut visited = HashSet::new();
let mut temp_mark = HashSet::new();
fn visit(
id: VariableId,
parents: &HashMap<VariableId, Vec<VariableId>>,
visited: &mut HashSet<VariableId>,
temp_mark: &mut HashSet<VariableId>,
order: &mut Vec<VariableId>,
) {
if visited.contains(&id) {
return;
}
if temp_mark.contains(&id) {
return; // Cycle detected
}
temp_mark.insert(id);
for &parent in parents.get(&id).unwrap_or(&vec![]) {
visit(parent, parents, visited, temp_mark, order);
}
temp_mark.remove(&id);
visited.insert(id);
order.push(id);
}
for &id in self.variables.keys() {
visit(id, &self.parents, &mut visited, &mut temp_mark, &mut order);
}
order
}
/// Compute values given current exogenous variables
fn forward(&self) -> HashMap<VariableId, Value> {
let mut values = HashMap::new();
let order = self.topological_order();
for id in order {
let parent_ids = self.parents.get(&id).unwrap();
let parent_values: Vec<Value> = parent_ids
.iter()
.map(|&pid| values.get(&pid).cloned().unwrap_or(Value::Continuous(0.0)))
.collect();
let value = if let Some(func) = self.equations.get(&id) {
// Combine exogenous with structural equation
let exo = self.exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0));
let base = func(&parent_values);
Value::Continuous(base.as_f64() + exo.as_f64())
} else {
self.exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0))
};
values.insert(id, value);
}
values
}
}
// ============================================================================
// INTERVENTION
// ============================================================================
/// Intervention: do(X = x)
#[derive(Clone)]
struct Intervention {
variable: VariableId,
value: Value,
}
impl Intervention {
fn new(variable: VariableId, value: Value) -> Self {
Self { variable, value }
}
}
/// Apply intervention and compute resulting distribution
fn apply_intervention(
model: &CausalModel,
intervention: &Intervention,
) -> HashMap<VariableId, Value> {
let mut values = HashMap::new();
let order = model.topological_order();
for id in order {
if id == intervention.variable {
// Override with intervention value
values.insert(id, intervention.value.clone());
} else {
let parent_ids = model.parents.get(&id).unwrap();
let parent_values: Vec<Value> = parent_ids
.iter()
.map(|&pid| values.get(&pid).cloned().unwrap_or(Value::Continuous(0.0)))
.collect();
let value = if let Some(func) = model.equations.get(&id) {
let exo = model.exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0));
let base = func(&parent_values);
Value::Continuous(base.as_f64() + exo.as_f64())
} else {
model.exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0))
};
values.insert(id, value);
}
}
values
}
/// Apply multiple interventions
fn apply_multi_intervention(
model: &CausalModel,
interventions: &[Intervention],
) -> HashMap<VariableId, Value> {
let intervention_map: HashMap<VariableId, Value> = interventions
.iter()
.map(|i| (i.variable, i.value.clone()))
.collect();
let mut values = HashMap::new();
let order = model.topological_order();
for id in order {
if let Some(value) = intervention_map.get(&id) {
values.insert(id, value.clone());
} else {
let parent_ids = model.parents.get(&id).unwrap();
let parent_values: Vec<Value> = parent_ids
.iter()
.map(|&pid| values.get(&pid).cloned().unwrap_or(Value::Continuous(0.0)))
.collect();
let value = if let Some(func) = model.equations.get(&id) {
let exo = model.exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0));
let base = func(&parent_values);
Value::Continuous(base.as_f64() + exo.as_f64())
} else {
model.exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0))
};
values.insert(id, value);
}
}
values
}
// ============================================================================
// COUNTERFACTUAL REASONING
// ============================================================================
/// Counterfactual query: Y_x(u) where we observed Y = y
struct CounterfactualQuery {
/// The variable we're asking about
target: VariableId,
/// The intervention
intervention: Intervention,
/// Observed facts
observations: HashMap<VariableId, Value>,
}
/// Compute counterfactual using abduction-action-prediction
fn compute_counterfactual(
model: &CausalModel,
query: &CounterfactualQuery,
) -> Option<Value> {
// Step 1: Abduction - infer exogenous variables from observations
let inferred_exogenous = abduct_exogenous(model, &query.observations)?;
// Step 2: Action - create modified model with intervention
// (We don't actually modify the model, we use the intervention directly)
// Step 3: Prediction - compute outcome under intervention with inferred exogenous
let mut values = HashMap::new();
let order = model.topological_order();
for id in order {
if id == query.intervention.variable {
values.insert(id, query.intervention.value.clone());
} else {
let parent_ids = model.parents.get(&id).unwrap();
let parent_values: Vec<Value> = parent_ids
.iter()
.map(|&pid| values.get(&pid).cloned().unwrap_or(Value::Continuous(0.0)))
.collect();
let value = if let Some(func) = model.equations.get(&id) {
let exo = inferred_exogenous
.get(&id)
.cloned()
.unwrap_or(Value::Continuous(0.0));
let base = func(&parent_values);
Value::Continuous(base.as_f64() + exo.as_f64())
} else {
inferred_exogenous
.get(&id)
.cloned()
.unwrap_or(Value::Continuous(0.0))
};
values.insert(id, value);
}
}
values.get(&query.target).cloned()
}
/// Abduct exogenous variables from observations
fn abduct_exogenous(
model: &CausalModel,
observations: &HashMap<VariableId, Value>,
) -> Option<HashMap<VariableId, Value>> {
let mut exogenous = model.exogenous.clone();
let order = model.topological_order();
// For each observed variable, infer the exogenous noise
let mut computed_values = HashMap::new();
for id in order {
let parent_ids = model.parents.get(&id).unwrap();
let parent_values: Vec<Value> = parent_ids
.iter()
.map(|&pid| {
computed_values
.get(&pid)
.cloned()
.unwrap_or(Value::Continuous(0.0))
})
.collect();
if let Some(observed) = observations.get(&id) {
// Infer exogenous: U = Y - f(Pa)
if let Some(func) = model.equations.get(&id) {
let structural_part = func(&parent_values).as_f64();
let inferred_exo = observed.as_f64() - structural_part;
exogenous.insert(id, Value::Continuous(inferred_exo));
}
computed_values.insert(id, observed.clone());
} else {
// Compute from parents
let value = if let Some(func) = model.equations.get(&id) {
let exo = exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0));
let base = func(&parent_values);
Value::Continuous(base.as_f64() + exo.as_f64())
} else {
exogenous.get(&id).cloned().unwrap_or(Value::Continuous(0.0))
};
computed_values.insert(id, value);
}
}
Some(exogenous)
}
// ============================================================================
// CAUSAL ABSTRACTION
// ============================================================================
/// Map between low-level and high-level causal models
struct CausalAbstraction {
/// Low-level model
low_level: CausalModel,
/// High-level model
high_level: CausalModel,
/// Variable mapping: high-level -> set of low-level variables
variable_map: HashMap<VariableId, Vec<VariableId>>,
/// Value mapping: how to aggregate low-level values
value_aggregator: Box<dyn Fn(&[Value]) -> Value + Send + Sync>,
}
impl CausalAbstraction {
fn new(low_level: CausalModel, high_level: CausalModel) -> Self {
Self {
low_level,
high_level,
variable_map: HashMap::new(),
value_aggregator: Box::new(|vals: &[Value]| {
let sum: f64 = vals.iter().map(|v| v.as_f64()).sum();
Value::Continuous(sum / vals.len().max(1) as f64)
}),
}
}
fn add_mapping(&mut self, high_var: VariableId, low_vars: Vec<VariableId>) {
self.variable_map.insert(high_var, low_vars);
}
/// Verify abstraction consistency: interventions commute
fn verify_consistency(&self, intervention: &Intervention) -> bool {
// High-level: intervene and compute
let high_values = apply_intervention(&self.high_level, intervention);
// Low-level: intervene on corresponding variables and aggregate
let low_vars = self.variable_map.get(&intervention.variable);
if low_vars.is_none() {
return false;
}
let low_interventions: Vec<Intervention> = low_vars
.unwrap()
.iter()
.map(|&v| Intervention::new(v, intervention.value.clone()))
.collect();
let low_values = apply_multi_intervention(&self.low_level, &low_interventions);
// Compare aggregated low-level values with high-level values
for (&high_var, low_vars) in &self.variable_map {
let high_val = high_values.get(&high_var).map(|v| v.as_f64()).unwrap_or(0.0);
let low_vals: Vec<Value> = low_vars
.iter()
.filter_map(|&lv| low_values.get(&lv).cloned())
.collect();
let aggregated = (self.value_aggregator)(&low_vals).as_f64();
if (high_val - aggregated).abs() > 1e-6 {
return false;
}
}
true
}
/// Compute abstraction error
fn compute_abstraction_error(&self, num_samples: usize) -> f64 {
let mut total_error = 0.0;
for i in 0..num_samples {
// Random intervention value
let value = Value::Continuous((i as f64 * 0.1).sin() * 10.0);
// Pick a random variable to intervene on
let high_vars: Vec<_> = self.high_level.variables.keys().copied().collect();
if high_vars.is_empty() {
continue;
}
let var_idx = i % high_vars.len();
let intervention = Intervention::new(high_vars[var_idx], value);
// Compute values
let high_values = apply_intervention(&self.high_level, &intervention);
let low_vars = self.variable_map.get(&intervention.variable);
if low_vars.is_none() {
continue;
}
let low_interventions: Vec<Intervention> = low_vars
.unwrap()
.iter()
.map(|&v| Intervention::new(v, intervention.value.clone()))
.collect();
let low_values = apply_multi_intervention(&self.low_level, &low_interventions);
// Compute error
for (&high_var, low_vars) in &self.variable_map {
let high_val = high_values.get(&high_var).map(|v| v.as_f64()).unwrap_or(0.0);
let low_vals: Vec<Value> = low_vars
.iter()
.filter_map(|&lv| low_values.get(&lv).cloned())
.collect();
let aggregated = (self.value_aggregator)(&low_vals).as_f64();
total_error += (high_val - aggregated).powi(2);
}
}
(total_error / num_samples.max(1) as f64).sqrt()
}
}
// ============================================================================
// CAUSAL EFFECT ESTIMATION
// ============================================================================
/// Average Treatment Effect
fn compute_ate(
model: &CausalModel,
treatment: VariableId,
outcome: VariableId,
treatment_values: (f64, f64), // (control, treated)
) -> f64 {
// E[Y | do(X = treated)] - E[Y | do(X = control)]
let intervention_treated = Intervention::new(treatment, Value::Continuous(treatment_values.1));
let intervention_control = Intervention::new(treatment, Value::Continuous(treatment_values.0));
let values_treated = apply_intervention(model, &intervention_treated);
let values_control = apply_intervention(model, &intervention_control);
let y_treated = values_treated.get(&outcome).map(|v| v.as_f64()).unwrap_or(0.0);
let y_control = values_control.get(&outcome).map(|v| v.as_f64()).unwrap_or(0.0);
y_treated - y_control
}
// ============================================================================
// BENCHMARK DATA GENERATORS
// ============================================================================
fn create_chain_model(length: usize) -> CausalModel {
let mut model = CausalModel::new();
let mut vars = Vec::new();
for i in 0..length {
let var = model.add_variable(&format!("V{}", i));
vars.push(var);
if i > 0 {
model.add_edge(vars[i - 1], var);
let parent_var = vars[i - 1];
model.set_equation(var, move |parents| {
if parents.is_empty() {
Value::Continuous(0.0)
} else {
Value::Continuous(parents[0].as_f64() * 0.8 + 0.5)
}
});
}
}
model
}
fn create_diamond_model(num_layers: usize, width: usize) -> CausalModel {
let mut model = CausalModel::new();
let mut layers: Vec<Vec<VariableId>> = Vec::new();
// Create layers
for layer in 0..num_layers {
let layer_width = if layer == 0 || layer == num_layers - 1 {
1
} else {
width
};
let mut layer_vars = Vec::new();
for i in 0..layer_width {
let var = model.add_variable(&format!("L{}_{}", layer, i));
layer_vars.push(var);
// Connect to previous layer
if layer > 0 {
for &parent in &layers[layer - 1] {
model.add_edge(parent, var);
}
model.set_equation(var, |parents| {
let sum: f64 = parents.iter().map(|p| p.as_f64()).sum();
Value::Continuous(sum / parents.len().max(1) as f64 + 0.1)
});
}
}
layers.push(layer_vars);
}
model
}
fn create_dense_model(num_vars: usize, density: f64, seed: u64) -> CausalModel {
let mut model = CausalModel::new();
let mut vars = Vec::new();
// Create variables
for i in 0..num_vars {
let var = model.add_variable(&format!("V{}", i));
vars.push(var);
}
// Add edges (respecting DAG structure: only forward edges)
let mut rng_state = seed;
for i in 0..num_vars {
for j in (i + 1)..num_vars {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let random = (rng_state >> 33) as f64 / (u32::MAX as f64);
if random < density {
model.add_edge(vars[i], vars[j]);
}
}
}
// Set equations
for i in 1..num_vars {
model.set_equation(vars[i], |parents| {
let sum: f64 = parents.iter().map(|p| p.as_f64()).sum();
Value::Continuous(sum * 0.5 + 0.1)
});
}
model
}
// ============================================================================
// BENCHMARKS
// ============================================================================
fn bench_intervention(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/intervention");
group.sample_size(100);
for &size in &[10, 50, 100, 200] {
let model = create_chain_model(size);
let var = VariableId(size / 2); // Intervene in middle
let intervention = Intervention::new(var, Value::Continuous(1.0));
group.throughput(Throughput::Elements(size as u64));
group.bench_with_input(
BenchmarkId::new("chain", size),
&(&model, &intervention),
|b, (model, intervention)| {
b.iter(|| black_box(apply_intervention(black_box(model), black_box(intervention))))
},
);
}
for &size in &[10, 25, 50] {
let model = create_diamond_model(4, size);
let var = VariableId(0);
let intervention = Intervention::new(var, Value::Continuous(1.0));
let total_vars = 2 + 2 * size; // 1 + size + size + 1
group.throughput(Throughput::Elements(total_vars as u64));
group.bench_with_input(
BenchmarkId::new("diamond", size),
&(&model, &intervention),
|b, (model, intervention)| {
b.iter(|| black_box(apply_intervention(black_box(model), black_box(intervention))))
},
);
}
group.finish();
}
fn bench_multi_intervention(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/multi_intervention");
group.sample_size(50);
for &num_interventions in &[1, 5, 10, 20] {
let model = create_dense_model(100, 0.1, 42);
let interventions: Vec<Intervention> = (0..num_interventions)
.map(|i| Intervention::new(VariableId(i * 5), Value::Continuous(1.0)))
.collect();
group.throughput(Throughput::Elements(num_interventions as u64));
group.bench_with_input(
BenchmarkId::new("dense_100", num_interventions),
&(&model, &interventions),
|b, (model, interventions)| {
b.iter(|| black_box(apply_multi_intervention(black_box(model), black_box(interventions))))
},
);
}
group.finish();
}
fn bench_counterfactual(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/counterfactual");
group.sample_size(50);
for &size in &[10, 25, 50, 100] {
let model = create_chain_model(size);
// Observe last variable
let mut observations = HashMap::new();
observations.insert(VariableId(size - 1), Value::Continuous(5.0));
let query = CounterfactualQuery {
target: VariableId(size - 1),
intervention: Intervention::new(VariableId(0), Value::Continuous(2.0)),
observations,
};
group.throughput(Throughput::Elements(size as u64));
group.bench_with_input(
BenchmarkId::new("chain", size),
&(&model, &query),
|b, (model, query)| {
b.iter(|| black_box(compute_counterfactual(black_box(model), black_box(query))))
},
);
}
group.finish();
}
fn bench_abstraction_verification(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/abstraction");
group.sample_size(30);
for &low_size in &[20, 50, 100] {
let high_size = low_size / 5;
let low_model = create_chain_model(low_size);
let high_model = create_chain_model(high_size);
let mut abstraction = CausalAbstraction::new(low_model, high_model);
// Map high-level vars to groups of low-level vars
for i in 0..high_size {
let low_vars: Vec<VariableId> = (0..5)
.map(|j| VariableId(i * 5 + j))
.collect();
abstraction.add_mapping(VariableId(i), low_vars);
}
let intervention = Intervention::new(VariableId(0), Value::Continuous(1.0));
group.throughput(Throughput::Elements(low_size as u64));
group.bench_with_input(
BenchmarkId::new("verify_single", low_size),
&(&abstraction, &intervention),
|b, (abstraction, intervention)| {
b.iter(|| black_box(abstraction.verify_consistency(black_box(intervention))))
},
);
group.bench_with_input(
BenchmarkId::new("compute_error", low_size),
&abstraction,
|b, abstraction| {
b.iter(|| black_box(abstraction.compute_abstraction_error(10)))
},
);
}
group.finish();
}
fn bench_ate(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/ate");
group.sample_size(100);
for &size in &[10, 50, 100] {
let model = create_dense_model(size, 0.15, 42);
let treatment = VariableId(0);
let outcome = VariableId(size - 1);
group.throughput(Throughput::Elements(size as u64));
group.bench_with_input(
BenchmarkId::new("dense", size),
&(&model, treatment, outcome),
|b, (model, treatment, outcome)| {
b.iter(|| {
black_box(compute_ate(
black_box(model),
*treatment,
*outcome,
(0.0, 1.0),
))
})
},
);
}
group.finish();
}
fn bench_topological_sort(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/topological_sort");
group.sample_size(100);
for &size in &[50, 100, 200, 500] {
let model = create_dense_model(size, 0.1, 42);
group.throughput(Throughput::Elements(size as u64));
group.bench_with_input(
BenchmarkId::new("dense", size),
&model,
|b, model| {
b.iter(|| black_box(model.topological_order()))
},
);
}
group.finish();
}
fn bench_forward_propagation(c: &mut Criterion) {
let mut group = c.benchmark_group("causal/forward");
group.sample_size(50);
for &size in &[50, 100, 200] {
let model = create_dense_model(size, 0.1, 42);
group.throughput(Throughput::Elements(size as u64));
group.bench_with_input(
BenchmarkId::new("dense", size),
&model,
|b, model| {
b.iter(|| black_box(model.forward()))
},
);
}
for &(layers, width) in &[(3, 10), (5, 10), (5, 20)] {
let model = create_diamond_model(layers, width);
let total_vars = 2 + (layers - 2) * width;
group.throughput(Throughput::Elements(total_vars as u64));
group.bench_with_input(
BenchmarkId::new(format!("diamond_{}x{}", layers, width), total_vars),
&model,
|b, model| {
b.iter(|| black_box(model.forward()))
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_intervention,
bench_multi_intervention,
bench_counterfactual,
bench_abstraction_verification,
bench_ate,
bench_topological_sort,
bench_forward_propagation,
);
criterion_main!(benches);

View File

@@ -0,0 +1,634 @@
//! Cohomology Benchmarks for Prime-Radiant
//!
//! Benchmarks for sheaf cohomology computations including:
//! - Coboundary operators at various graph sizes
//! - Cohomology group computation
//! - Sheaf neural network layer operations
//!
//! Target metrics:
//! - Coboundary: < 1ms for 100 nodes, < 10ms for 1K nodes
//! - Cohomology groups: < 5ms for 1K nodes
//! - Sheaf neural layer: < 2ms per forward pass
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::collections::HashMap;
// ============================================================================
// MOCK TYPES FOR COHOMOLOGY BENCHMARKING
// ============================================================================
/// Sparse matrix representation for boundary/coboundary operators
#[derive(Clone)]
struct SparseMatrix {
rows: usize,
cols: usize,
data: Vec<(usize, usize, f64)>, // (row, col, value)
}
impl SparseMatrix {
fn new(rows: usize, cols: usize) -> Self {
Self {
rows,
cols,
data: Vec::new(),
}
}
fn insert(&mut self, row: usize, col: usize, value: f64) {
if value.abs() > 1e-10 {
self.data.push((row, col, value));
}
}
fn multiply_vector(&self, v: &[f64]) -> Vec<f64> {
let mut result = vec![0.0; self.rows];
for &(row, col, val) in &self.data {
if col < v.len() {
result[row] += val * v[col];
}
}
result
}
fn transpose(&self) -> Self {
let mut transposed = SparseMatrix::new(self.cols, self.rows);
for &(row, col, val) in &self.data {
transposed.insert(col, row, val);
}
transposed
}
}
/// Simplicial complex for cohomology computation
struct SimplicialComplex {
vertices: Vec<usize>,
edges: Vec<(usize, usize)>,
triangles: Vec<(usize, usize, usize)>,
}
impl SimplicialComplex {
fn from_graph(num_nodes: usize, edges: Vec<(usize, usize)>) -> Self {
let vertices: Vec<usize> = (0..num_nodes).collect();
// Find triangles (3-cliques)
let mut adjacency: HashMap<usize, Vec<usize>> = HashMap::new();
for &(u, v) in &edges {
adjacency.entry(u).or_default().push(v);
adjacency.entry(v).or_default().push(u);
}
let mut triangles = Vec::new();
for &(u, v) in &edges {
if let (Some(neighbors_u), Some(neighbors_v)) = (adjacency.get(&u), adjacency.get(&v)) {
for &w in neighbors_u {
if w > v && neighbors_v.contains(&w) {
triangles.push((u, v, w));
}
}
}
}
Self {
vertices,
edges,
triangles,
}
}
fn num_vertices(&self) -> usize {
self.vertices.len()
}
fn num_edges(&self) -> usize {
self.edges.len()
}
fn num_triangles(&self) -> usize {
self.triangles.len()
}
}
/// Coboundary operator computation
struct CoboundaryOperator {
/// Coboundary from 0-cochains to 1-cochains (d0)
d0: SparseMatrix,
/// Coboundary from 1-cochains to 2-cochains (d1)
d1: SparseMatrix,
}
impl CoboundaryOperator {
fn from_complex(complex: &SimplicialComplex) -> Self {
let num_v = complex.num_vertices();
let num_e = complex.num_edges();
let num_t = complex.num_triangles();
// Build d0: C^0 -> C^1 (vertices to edges)
let mut d0 = SparseMatrix::new(num_e, num_v);
for (i, &(u, v)) in complex.edges.iter().enumerate() {
d0.insert(i, u, -1.0);
d0.insert(i, v, 1.0);
}
// Build d1: C^1 -> C^2 (edges to triangles)
let mut d1 = SparseMatrix::new(num_t, num_e);
// Create edge index map
let edge_map: HashMap<(usize, usize), usize> = complex
.edges
.iter()
.enumerate()
.map(|(i, &(u, v))| ((u.min(v), u.max(v)), i))
.collect();
for (i, &(a, b, c)) in complex.triangles.iter().enumerate() {
// Triangle boundary: ab - ac + bc
if let Some(&e_ab) = edge_map.get(&(a.min(b), a.max(b))) {
d1.insert(i, e_ab, 1.0);
}
if let Some(&e_ac) = edge_map.get(&(a.min(c), a.max(c))) {
d1.insert(i, e_ac, -1.0);
}
if let Some(&e_bc) = edge_map.get(&(b.min(c), b.max(c))) {
d1.insert(i, e_bc, 1.0);
}
}
Self { d0, d1 }
}
fn apply_d0(&self, cochain: &[f64]) -> Vec<f64> {
self.d0.multiply_vector(cochain)
}
fn apply_d1(&self, cochain: &[f64]) -> Vec<f64> {
self.d1.multiply_vector(cochain)
}
}
/// Cohomology group computation via Hodge decomposition
struct CohomologyComputer {
coboundary: CoboundaryOperator,
laplacian_0: SparseMatrix,
laplacian_1: SparseMatrix,
}
impl CohomologyComputer {
fn new(complex: &SimplicialComplex) -> Self {
let coboundary = CoboundaryOperator::from_complex(complex);
// Hodge Laplacian L_k = d_k^* d_k + d_{k-1} d_{k-1}^*
// For 0-forms: L_0 = d_0^* d_0
// For 1-forms: L_1 = d_1^* d_1 + d_0 d_0^*
let d0_t = coboundary.d0.transpose();
let d1_t = coboundary.d1.transpose();
// Simplified Laplacian computation (degree matrix - adjacency)
let laplacian_0 = Self::compute_graph_laplacian(complex);
let laplacian_1 = Self::compute_edge_laplacian(complex);
Self {
coboundary,
laplacian_0,
laplacian_1,
}
}
fn compute_graph_laplacian(complex: &SimplicialComplex) -> SparseMatrix {
let n = complex.num_vertices();
let mut laplacian = SparseMatrix::new(n, n);
let mut degrees = vec![0.0; n];
for &(u, v) in &complex.edges {
degrees[u] += 1.0;
degrees[v] += 1.0;
laplacian.insert(u, v, -1.0);
laplacian.insert(v, u, -1.0);
}
for (i, &d) in degrees.iter().enumerate() {
laplacian.insert(i, i, d);
}
laplacian
}
fn compute_edge_laplacian(complex: &SimplicialComplex) -> SparseMatrix {
let m = complex.num_edges();
let mut laplacian = SparseMatrix::new(m, m);
// Edge Laplacian: edges sharing a vertex are connected
for (i, &(u1, v1)) in complex.edges.iter().enumerate() {
let mut degree = 0.0;
for (j, &(u2, v2)) in complex.edges.iter().enumerate() {
if i != j && (u1 == u2 || u1 == v2 || v1 == u2 || v1 == v2) {
laplacian.insert(i, j, -1.0);
degree += 1.0;
}
}
laplacian.insert(i, i, degree);
}
laplacian
}
fn compute_betti_0(&self) -> usize {
// Betti_0 = dim(ker(d0)) = connected components
// Use power iteration to estimate null space dimension
self.estimate_kernel_dimension(&self.laplacian_0, 1e-6)
}
fn compute_betti_1(&self) -> usize {
// Betti_1 = dim(ker(L_1)) = number of independent cycles
self.estimate_kernel_dimension(&self.laplacian_1, 1e-6)
}
fn estimate_kernel_dimension(&self, laplacian: &SparseMatrix, tolerance: f64) -> usize {
// Count eigenvalues near zero using power iteration on shifted matrix
let n = laplacian.rows;
if n == 0 {
return 0;
}
// Simplified: use trace-based estimation
let mut trace = 0.0;
for &(row, col, val) in &laplacian.data {
if row == col {
trace += val;
}
}
// Estimate kernel dimension from spectral gap
let avg_degree = trace / n as f64;
if avg_degree < tolerance {
n
} else {
1 // At least one connected component
}
}
fn compute_cohomology_class(&self, cochain: &[f64]) -> Vec<f64> {
// Project cochain onto harmonic forms (kernel of Laplacian)
let d_cochain = self.coboundary.apply_d0(cochain);
// Subtract exact part
let mut harmonic = cochain.to_vec();
let exact_energy: f64 = d_cochain.iter().map(|x| x * x).sum();
if exact_energy > 1e-10 {
// Simple projection (full implementation would use Hodge decomposition)
let scale = 1.0 / (1.0 + exact_energy.sqrt());
for h in &mut harmonic {
*h *= scale;
}
}
harmonic
}
}
/// Sheaf neural network layer
struct SheafNeuralLayer {
/// Node feature dimension
node_dim: usize,
/// Edge feature dimension (stalk dimension)
edge_dim: usize,
/// Restriction map weights (per edge type)
restriction_weights: Vec<Vec<f64>>,
/// Aggregation weights
aggregation_weights: Vec<f64>,
}
impl SheafNeuralLayer {
fn new(node_dim: usize, edge_dim: usize, num_edges: usize) -> Self {
// Initialize with random weights
let restriction_weights: Vec<Vec<f64>> = (0..num_edges)
.map(|_| {
(0..node_dim * edge_dim)
.map(|i| ((i as f64 * 0.1).sin() * 0.1))
.collect()
})
.collect();
let aggregation_weights: Vec<f64> = (0..edge_dim * node_dim)
.map(|i| ((i as f64 * 0.2).cos() * 0.1))
.collect();
Self {
node_dim,
edge_dim,
restriction_weights,
aggregation_weights,
}
}
fn forward(&self, node_features: &[Vec<f64>], edges: &[(usize, usize)]) -> Vec<Vec<f64>> {
let num_nodes = node_features.len();
let mut output = vec![vec![0.0; self.node_dim]; num_nodes];
// Message passing with sheaf structure
for (edge_idx, &(src, dst)) in edges.iter().enumerate() {
if src >= num_nodes || dst >= num_nodes {
continue;
}
// Apply restriction map to source
let restricted = self.apply_restriction(
&node_features[src],
edge_idx % self.restriction_weights.len(),
);
// Aggregate at destination
for (i, &r) in restricted.iter().enumerate().take(self.node_dim) {
output[dst][i] += r;
}
}
// Apply non-linearity (ReLU)
for node_output in &mut output {
for val in node_output {
*val = val.max(0.0);
}
}
output
}
fn apply_restriction(&self, features: &[f64], edge_idx: usize) -> Vec<f64> {
let weights = &self.restriction_weights[edge_idx];
let mut result = vec![0.0; self.edge_dim];
for (i, r) in result.iter_mut().enumerate() {
for (j, &f) in features.iter().enumerate().take(self.node_dim) {
let w_idx = i * self.node_dim + j;
if w_idx < weights.len() {
*r += weights[w_idx] * f;
}
}
}
result
}
fn compute_cohomology_loss(&self, node_features: &[Vec<f64>], edges: &[(usize, usize)]) -> f64 {
// Sheaf Laplacian-based loss: measures deviation from global section
let mut loss = 0.0;
for (edge_idx, &(src, dst)) in edges.iter().enumerate() {
if src >= node_features.len() || dst >= node_features.len() {
continue;
}
let restricted_src = self.apply_restriction(
&node_features[src],
edge_idx % self.restriction_weights.len(),
);
let restricted_dst = self.apply_restriction(
&node_features[dst],
edge_idx % self.restriction_weights.len(),
);
// Residual: difference of restricted sections
for (rs, rd) in restricted_src.iter().zip(restricted_dst.iter()) {
let diff = rs - rd;
loss += diff * diff;
}
}
loss
}
}
// ============================================================================
// GRAPH GENERATORS
// ============================================================================
fn generate_random_graph(num_nodes: usize, edge_probability: f64, seed: u64) -> Vec<(usize, usize)> {
let mut edges = Vec::new();
let mut rng_state = seed;
for i in 0..num_nodes {
for j in (i + 1)..num_nodes {
// Simple LCG for deterministic "random" numbers
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let random = (rng_state >> 33) as f64 / (u32::MAX as f64);
if random < edge_probability {
edges.push((i, j));
}
}
}
edges
}
fn generate_grid_graph(width: usize, height: usize) -> Vec<(usize, usize)> {
let mut edges = Vec::new();
for y in 0..height {
for x in 0..width {
let node = y * width + x;
// Right neighbor
if x + 1 < width {
edges.push((node, node + 1));
}
// Bottom neighbor
if y + 1 < height {
edges.push((node, node + width));
}
}
}
edges
}
// ============================================================================
// BENCHMARKS
// ============================================================================
fn bench_coboundary_computation(c: &mut Criterion) {
let mut group = c.benchmark_group("cohomology/coboundary");
group.sample_size(50);
for &num_nodes in &[100, 500, 1000, 5000, 10000] {
let edges = generate_random_graph(num_nodes, 3.0 / num_nodes as f64, 42);
let complex = SimplicialComplex::from_graph(num_nodes, edges);
let coboundary = CoboundaryOperator::from_complex(&complex);
let cochain: Vec<f64> = (0..num_nodes).map(|i| (i as f64).sin()).collect();
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("d0_apply", num_nodes),
&(&coboundary, &cochain),
|b, (cob, cochain)| {
b.iter(|| {
black_box(cob.apply_d0(black_box(cochain)))
})
},
);
}
group.finish();
}
fn bench_cohomology_groups(c: &mut Criterion) {
let mut group = c.benchmark_group("cohomology/groups");
group.sample_size(30);
for &num_nodes in &[100, 500, 1000, 2000] {
let edges = generate_random_graph(num_nodes, 4.0 / num_nodes as f64, 42);
let complex = SimplicialComplex::from_graph(num_nodes, edges);
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("betti_0", num_nodes),
&complex,
|b, complex| {
b.iter(|| {
let computer = CohomologyComputer::new(black_box(complex));
black_box(computer.compute_betti_0())
})
},
);
group.bench_with_input(
BenchmarkId::new("betti_1", num_nodes),
&complex,
|b, complex| {
b.iter(|| {
let computer = CohomologyComputer::new(black_box(complex));
black_box(computer.compute_betti_1())
})
},
);
}
group.finish();
}
fn bench_cohomology_class(c: &mut Criterion) {
let mut group = c.benchmark_group("cohomology/class_computation");
group.sample_size(50);
for &num_nodes in &[100, 500, 1000] {
let edges = generate_random_graph(num_nodes, 4.0 / num_nodes as f64, 42);
let complex = SimplicialComplex::from_graph(num_nodes, edges);
let computer = CohomologyComputer::new(&complex);
let cochain: Vec<f64> = (0..num_nodes).map(|i| (i as f64 * 0.1).sin()).collect();
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("project_harmonic", num_nodes),
&(&computer, &cochain),
|b, (comp, cochain)| {
b.iter(|| {
black_box(comp.compute_cohomology_class(black_box(cochain)))
})
},
);
}
group.finish();
}
fn bench_sheaf_neural_layer(c: &mut Criterion) {
let mut group = c.benchmark_group("cohomology/sheaf_neural");
group.sample_size(50);
let feature_dim = 64;
let edge_dim = 32;
for &num_nodes in &[100, 500, 1000, 2000] {
let edges = generate_random_graph(num_nodes, 5.0 / num_nodes as f64, 42);
let num_edges = edges.len();
let layer = SheafNeuralLayer::new(feature_dim, edge_dim, num_edges.max(1));
let node_features: Vec<Vec<f64>> = (0..num_nodes)
.map(|i| (0..feature_dim).map(|j| ((i + j) as f64 * 0.1).sin()).collect())
.collect();
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("forward", num_nodes),
&(&layer, &node_features, &edges),
|b, (layer, features, edges)| {
b.iter(|| {
black_box(layer.forward(black_box(features), black_box(edges)))
})
},
);
group.bench_with_input(
BenchmarkId::new("cohomology_loss", num_nodes),
&(&layer, &node_features, &edges),
|b, (layer, features, edges)| {
b.iter(|| {
black_box(layer.compute_cohomology_loss(black_box(features), black_box(edges)))
})
},
);
}
group.finish();
}
fn bench_grid_topology(c: &mut Criterion) {
let mut group = c.benchmark_group("cohomology/grid_topology");
group.sample_size(30);
for &size in &[10, 20, 32, 50] {
let num_nodes = size * size;
let edges = generate_grid_graph(size, size);
let complex = SimplicialComplex::from_graph(num_nodes, edges.clone());
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("build_coboundary", format!("{}x{}", size, size)),
&complex,
|b, complex| {
b.iter(|| {
black_box(CoboundaryOperator::from_complex(black_box(complex)))
})
},
);
let layer = SheafNeuralLayer::new(32, 16, edges.len().max(1));
let features: Vec<Vec<f64>> = (0..num_nodes)
.map(|i| (0..32).map(|j| ((i + j) as f64 * 0.1).cos()).collect())
.collect();
group.bench_with_input(
BenchmarkId::new("sheaf_layer", format!("{}x{}", size, size)),
&(&layer, &features, &edges),
|b, (layer, features, edges)| {
b.iter(|| {
black_box(layer.forward(black_box(features), black_box(edges)))
})
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_coboundary_computation,
bench_cohomology_groups,
bench_cohomology_class,
bench_sheaf_neural_layer,
bench_grid_topology,
);
criterion_main!(benches);

View File

@@ -0,0 +1,825 @@
//! Integrated Coherence Benchmarks for Prime-Radiant
//!
//! End-to-end benchmarks combining all modules:
//! - Full coherence pipeline (topology -> spectral -> causal -> decision)
//! - Memory usage profiling
//! - Throughput measurements
//! - Scalability analysis
//!
//! Target metrics:
//! - End-to-end coherence: < 50ms for 1K entities
//! - Memory overhead: < 100MB for 10K entities
//! - Throughput: > 100 decisions/second
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::collections::HashMap;
use std::time::Instant;
// ============================================================================
// INTEGRATED COHERENCE ENGINE
// ============================================================================
/// Entity in the coherence graph
#[derive(Clone, Debug)]
struct Entity {
id: usize,
state: Vec<f64>,
beliefs: Vec<Belief>,
}
/// A belief with confidence
#[derive(Clone, Debug)]
struct Belief {
content: String,
confidence: f64,
source_id: usize,
}
/// Constraint between entities
#[derive(Clone, Debug)]
struct Constraint {
source: usize,
target: usize,
weight: f64,
restriction_map: Vec<Vec<f64>>,
}
/// Coherence decision
#[derive(Clone, Debug)]
pub enum CoherenceDecision {
Accept { confidence: f64 },
Reject { reason: String, energy: f64 },
Defer { required_evidence: Vec<String> },
}
/// Full coherence computation result
#[derive(Clone, Debug)]
pub struct CoherenceResult {
/// Total coherence energy (lower is better)
pub total_energy: f64,
/// Topological coherence (from cohomology)
pub topological_energy: f64,
/// Spectral coherence (from eigenvalues)
pub spectral_energy: f64,
/// Causal coherence (from intervention consistency)
pub causal_energy: f64,
/// Betti numbers
pub betti: Vec<usize>,
/// Spectral gap
pub spectral_gap: f64,
/// Final decision
pub decision: CoherenceDecision,
}
/// Integrated coherence engine
struct CoherenceEngine {
entities: Vec<Entity>,
constraints: Vec<Constraint>,
/// Thresholds for decision making
accept_threshold: f64,
reject_threshold: f64,
}
impl CoherenceEngine {
fn new() -> Self {
Self {
entities: Vec::new(),
constraints: Vec::new(),
accept_threshold: 0.1,
reject_threshold: 1.0,
}
}
fn add_entity(&mut self, state_dim: usize) -> usize {
let id = self.entities.len();
let entity = Entity {
id,
state: vec![0.0; state_dim],
beliefs: Vec::new(),
};
self.entities.push(entity);
id
}
fn set_state(&mut self, id: usize, state: Vec<f64>) {
if id < self.entities.len() {
self.entities[id].state = state;
}
}
fn add_constraint(&mut self, source: usize, target: usize, weight: f64) {
let dim = if source < self.entities.len() {
self.entities[source].state.len()
} else {
16
};
// Identity restriction map
let restriction_map: Vec<Vec<f64>> = (0..dim)
.map(|i| {
let mut row = vec![0.0; dim];
row[i] = 1.0;
row
})
.collect();
self.constraints.push(Constraint {
source,
target,
weight,
restriction_map,
});
}
/// Compute full coherence
fn compute_coherence(&self) -> CoherenceResult {
// 1. Topological coherence via coboundary computation
let topological_energy = self.compute_topological_energy();
// 2. Spectral coherence via Laplacian eigenvalues
let (spectral_energy, spectral_gap) = self.compute_spectral_coherence();
// 3. Causal coherence via intervention consistency
let causal_energy = self.compute_causal_energy();
// 4. Combined energy
let total_energy = topological_energy + spectral_energy + causal_energy;
// 5. Betti numbers approximation
let betti = self.compute_betti_numbers();
// 6. Decision
let decision = if total_energy < self.accept_threshold {
CoherenceDecision::Accept {
confidence: 1.0 - total_energy / self.accept_threshold,
}
} else if total_energy > self.reject_threshold {
CoherenceDecision::Reject {
reason: "Energy exceeds rejection threshold".to_string(),
energy: total_energy,
}
} else {
CoherenceDecision::Defer {
required_evidence: vec!["Additional context needed".to_string()],
}
};
CoherenceResult {
total_energy,
topological_energy,
spectral_energy,
causal_energy,
betti,
spectral_gap,
decision,
}
}
fn compute_topological_energy(&self) -> f64 {
let mut energy = 0.0;
// Compute residuals at each constraint (coboundary)
for constraint in &self.constraints {
if constraint.source >= self.entities.len()
|| constraint.target >= self.entities.len()
{
continue;
}
let source_state = &self.entities[constraint.source].state;
let target_state = &self.entities[constraint.target].state;
// Apply restriction map
let restricted_source = self.apply_restriction(&constraint.restriction_map, source_state);
// Residual = rho(source) - target
let mut residual_sq = 0.0;
for (rs, ts) in restricted_source.iter().zip(target_state.iter()) {
let diff = rs - ts;
residual_sq += diff * diff;
}
energy += constraint.weight * residual_sq;
}
energy
}
fn apply_restriction(&self, map: &[Vec<f64>], state: &[f64]) -> Vec<f64> {
map.iter()
.map(|row| {
row.iter()
.zip(state.iter())
.map(|(a, b)| a * b)
.sum()
})
.collect()
}
fn compute_spectral_coherence(&self) -> (f64, f64) {
let n = self.entities.len();
if n == 0 {
return (0.0, 0.0);
}
// Build Laplacian
let mut laplacian = vec![vec![0.0; n]; n];
let mut degrees = vec![0.0; n];
for constraint in &self.constraints {
if constraint.source < n && constraint.target < n {
let w = constraint.weight;
laplacian[constraint.source][constraint.target] -= w;
laplacian[constraint.target][constraint.source] -= w;
degrees[constraint.source] += w;
degrees[constraint.target] += w;
}
}
for i in 0..n {
laplacian[i][i] = degrees[i];
}
// Power iteration for largest eigenvalue
let mut v: Vec<f64> = (0..n).map(|i| ((i + 1) as f64).sqrt().sin()).collect();
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
for x in &mut v {
*x /= norm;
}
let mut lambda_max = 0.0;
for _ in 0..50 {
let mut y = vec![0.0; n];
for i in 0..n {
for j in 0..n {
y[i] += laplacian[i][j] * v[j];
}
}
lambda_max = v.iter().zip(y.iter()).map(|(a, b)| a * b).sum();
let norm: f64 = y.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > 1e-10 {
v = y.iter().map(|x| x / norm).collect();
}
}
// Estimate spectral gap (lambda_2 / lambda_max)
let spectral_gap = if n > 1 { 0.1 } else { 1.0 }; // Simplified
// Spectral energy based on eigenvalue distribution
let spectral_energy = if lambda_max > 0.0 {
(lambda_max - degrees.iter().sum::<f64>() / n as f64).abs()
} else {
0.0
};
(spectral_energy * 0.01, spectral_gap)
}
fn compute_causal_energy(&self) -> f64 {
// Check if state updates are consistent with causal ordering
// Simplified: measure variance in state transitions
let mut energy = 0.0;
let mut count = 0;
for constraint in &self.constraints {
if constraint.source >= self.entities.len()
|| constraint.target >= self.entities.len()
{
continue;
}
let source_state = &self.entities[constraint.source].state;
let target_state = &self.entities[constraint.target].state;
// Causal consistency: target should be "downstream" of source
let source_norm: f64 = source_state.iter().map(|x| x * x).sum();
let target_norm: f64 = target_state.iter().map(|x| x * x).sum();
// Penalize if target has unexplained variance
if target_norm > source_norm * 1.5 {
energy += (target_norm - source_norm * 1.5) * 0.1;
}
count += 1;
}
if count > 0 {
energy / count as f64
} else {
0.0
}
}
fn compute_betti_numbers(&self) -> Vec<usize> {
let n = self.entities.len();
let m = self.constraints.len();
// Very rough approximation
// Betti_0 = connected components
// Betti_1 = independent cycles
let betti_0 = if n > m { n - m } else { 1 };
let betti_1 = if m > n { m - n } else { 0 };
vec![betti_0.max(1), betti_1]
}
}
// ============================================================================
// STREAMING COHERENCE PROCESSOR
// ============================================================================
/// Incremental coherence updates
struct StreamingCoherence {
engine: CoherenceEngine,
/// Cache for incremental updates
residual_cache: HashMap<(usize, usize), f64>,
/// Rolling energy window
energy_history: Vec<f64>,
history_window: usize,
}
impl StreamingCoherence {
fn new(history_window: usize) -> Self {
Self {
engine: CoherenceEngine::new(),
residual_cache: HashMap::new(),
energy_history: Vec::new(),
history_window,
}
}
fn update_entity(&mut self, id: usize, state: Vec<f64>) -> f64 {
self.engine.set_state(id, state);
// Compute incremental energy delta
let mut delta = 0.0;
for constraint in &self.engine.constraints {
if constraint.source == id || constraint.target == id {
let old_residual = self.residual_cache
.get(&(constraint.source, constraint.target))
.copied()
.unwrap_or(0.0);
let new_residual = self.compute_residual(constraint);
delta += (new_residual - old_residual).abs();
self.residual_cache
.insert((constraint.source, constraint.target), new_residual);
}
}
// Update history
self.energy_history.push(delta);
if self.energy_history.len() > self.history_window {
self.energy_history.remove(0);
}
delta
}
fn compute_residual(&self, constraint: &Constraint) -> f64 {
if constraint.source >= self.engine.entities.len()
|| constraint.target >= self.engine.entities.len()
{
return 0.0;
}
let source = &self.engine.entities[constraint.source].state;
let target = &self.engine.entities[constraint.target].state;
let restricted = self.engine.apply_restriction(&constraint.restriction_map, source);
let mut residual_sq = 0.0;
for (r, t) in restricted.iter().zip(target.iter()) {
let diff = r - t;
residual_sq += diff * diff;
}
constraint.weight * residual_sq
}
fn get_trend(&self) -> f64 {
if self.energy_history.len() < 2 {
return 0.0;
}
let n = self.energy_history.len();
let recent = &self.energy_history[(n / 2)..];
let older = &self.energy_history[..(n / 2)];
let recent_avg: f64 = recent.iter().sum::<f64>() / recent.len() as f64;
let older_avg: f64 = older.iter().sum::<f64>() / older.len().max(1) as f64;
recent_avg - older_avg
}
}
// ============================================================================
// BATCH COHERENCE PROCESSOR
// ============================================================================
/// Batch processing for high throughput
struct BatchCoherence {
batch_size: usize,
pending: Vec<(usize, Vec<f64>)>,
engine: CoherenceEngine,
}
impl BatchCoherence {
fn new(batch_size: usize) -> Self {
Self {
batch_size,
pending: Vec::new(),
engine: CoherenceEngine::new(),
}
}
fn add_update(&mut self, id: usize, state: Vec<f64>) -> Option<Vec<CoherenceResult>> {
self.pending.push((id, state));
if self.pending.len() >= self.batch_size {
Some(self.process_batch())
} else {
None
}
}
fn process_batch(&mut self) -> Vec<CoherenceResult> {
let mut results = Vec::with_capacity(self.pending.len());
for (id, state) in &self.pending {
self.engine.set_state(*id, state.clone());
results.push(self.engine.compute_coherence());
}
self.pending.clear();
results
}
fn flush(&mut self) -> Vec<CoherenceResult> {
self.process_batch()
}
}
// ============================================================================
// MEMORY PROFILING
// ============================================================================
struct MemoryProfile {
entity_bytes: usize,
constraint_bytes: usize,
cache_bytes: usize,
total_bytes: usize,
}
fn estimate_memory(engine: &CoherenceEngine) -> MemoryProfile {
let entity_bytes: usize = engine.entities.iter()
.map(|e| {
std::mem::size_of::<Entity>()
+ e.state.len() * std::mem::size_of::<f64>()
+ e.beliefs.len() * std::mem::size_of::<Belief>()
})
.sum();
let constraint_bytes: usize = engine.constraints.iter()
.map(|c| {
std::mem::size_of::<Constraint>()
+ c.restriction_map.len() * c.restriction_map.get(0).map(|r| r.len()).unwrap_or(0) * std::mem::size_of::<f64>()
})
.sum();
let cache_bytes = 0; // Would include residual cache if implemented
let total_bytes = entity_bytes + constraint_bytes + cache_bytes
+ std::mem::size_of::<CoherenceEngine>();
MemoryProfile {
entity_bytes,
constraint_bytes,
cache_bytes,
total_bytes,
}
}
// ============================================================================
// DATA GENERATORS
// ============================================================================
fn generate_coherence_graph(num_entities: usize, avg_degree: usize, state_dim: usize) -> CoherenceEngine {
let mut engine = CoherenceEngine::new();
// Add entities
for i in 0..num_entities {
let id = engine.add_entity(state_dim);
let state: Vec<f64> = (0..state_dim)
.map(|j| ((i * state_dim + j) as f64 * 0.1).sin())
.collect();
engine.set_state(id, state);
}
// Add constraints with random-ish pattern
let mut rng_state = 42u64;
for i in 0..num_entities {
for _ in 0..avg_degree {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let j = (rng_state as usize) % num_entities;
if i != j {
let weight = ((rng_state >> 32) as f64 / (u32::MAX as f64)) * 0.9 + 0.1;
engine.add_constraint(i, j, weight);
}
}
}
engine
}
fn generate_hierarchical_graph(
num_levels: usize,
branching: usize,
state_dim: usize,
) -> CoherenceEngine {
let mut engine = CoherenceEngine::new();
let mut level_nodes: Vec<Vec<usize>> = Vec::new();
// Create hierarchical structure
for level in 0..num_levels {
let num_nodes = branching.pow(level as u32);
let mut nodes = Vec::new();
for i in 0..num_nodes {
let id = engine.add_entity(state_dim);
let state: Vec<f64> = (0..state_dim)
.map(|j| ((level * 1000 + i * state_dim + j) as f64 * 0.1).sin())
.collect();
engine.set_state(id, state);
nodes.push(id);
}
// Connect to parent level
if level > 0 {
for (i, &node) in nodes.iter().enumerate() {
let parent_idx = i / branching;
if parent_idx < level_nodes[level - 1].len() {
let parent = level_nodes[level - 1][parent_idx];
engine.add_constraint(parent, node, 1.0);
}
}
}
level_nodes.push(nodes);
}
engine
}
// ============================================================================
// BENCHMARKS
// ============================================================================
fn bench_end_to_end_coherence(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/end_to_end");
group.sample_size(20);
for &num_entities in &[100, 500, 1000, 2000] {
let engine = generate_coherence_graph(num_entities, 5, 32);
group.throughput(Throughput::Elements(num_entities as u64));
group.bench_with_input(
BenchmarkId::new("full_coherence", num_entities),
&engine,
|b, engine| {
b.iter(|| black_box(engine.compute_coherence()))
},
);
}
group.finish();
}
fn bench_component_breakdown(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/components");
group.sample_size(30);
for &num_entities in &[500, 1000, 2000] {
let engine = generate_coherence_graph(num_entities, 5, 32);
group.throughput(Throughput::Elements(num_entities as u64));
group.bench_with_input(
BenchmarkId::new("topological", num_entities),
&engine,
|b, engine| {
b.iter(|| black_box(engine.compute_topological_energy()))
},
);
group.bench_with_input(
BenchmarkId::new("spectral", num_entities),
&engine,
|b, engine| {
b.iter(|| black_box(engine.compute_spectral_coherence()))
},
);
group.bench_with_input(
BenchmarkId::new("causal", num_entities),
&engine,
|b, engine| {
b.iter(|| black_box(engine.compute_causal_energy()))
},
);
}
group.finish();
}
fn bench_streaming_updates(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/streaming");
group.sample_size(50);
for &num_entities in &[500, 1000, 2000] {
let base_engine = generate_coherence_graph(num_entities, 5, 32);
group.throughput(Throughput::Elements(100)); // 100 updates per iteration
group.bench_with_input(
BenchmarkId::new("incremental_updates", num_entities),
&num_entities,
|b, &n| {
b.iter_batched(
|| {
let mut streaming = StreamingCoherence::new(100);
streaming.engine = generate_coherence_graph(n, 5, 32);
streaming
},
|mut streaming| {
for i in 0..100 {
let state: Vec<f64> = (0..32)
.map(|j| ((i * 32 + j) as f64 * 0.01).sin())
.collect();
black_box(streaming.update_entity(i % n, state));
}
},
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn bench_batch_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/batch_throughput");
group.sample_size(20);
for &batch_size in &[10, 50, 100, 200] {
let num_entities = 1000;
group.throughput(Throughput::Elements(batch_size as u64));
group.bench_with_input(
BenchmarkId::new("process_batch", batch_size),
&batch_size,
|b, &batch_size| {
b.iter_batched(
|| {
let mut batch = BatchCoherence::new(batch_size);
batch.engine = generate_coherence_graph(num_entities, 5, 32);
// Pre-fill pending
for i in 0..(batch_size - 1) {
let state: Vec<f64> = (0..32)
.map(|j| ((i * 32 + j) as f64 * 0.01).cos())
.collect();
batch.pending.push((i % num_entities, state));
}
batch
},
|mut batch| {
let state: Vec<f64> = (0..32).map(|j| (j as f64 * 0.02).sin()).collect();
black_box(batch.add_update(0, state))
},
criterion::BatchSize::SmallInput,
)
},
);
}
group.finish();
}
fn bench_hierarchical_coherence(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/hierarchical");
group.sample_size(20);
for &(levels, branching) in &[(3, 4), (4, 3), (5, 2), (4, 4)] {
let engine = generate_hierarchical_graph(levels, branching, 32);
let total_nodes: usize = (0..levels).map(|l| branching.pow(l as u32)).sum();
group.throughput(Throughput::Elements(total_nodes as u64));
group.bench_with_input(
BenchmarkId::new(format!("{}L_{}B", levels, branching), total_nodes),
&engine,
|b, engine| {
b.iter(|| black_box(engine.compute_coherence()))
},
);
}
group.finish();
}
fn bench_memory_scaling(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/memory");
group.sample_size(10);
for &num_entities in &[1000, 5000, 10000] {
group.bench_with_input(
BenchmarkId::new("estimate_memory", num_entities),
&num_entities,
|b, &n| {
b.iter_batched(
|| generate_coherence_graph(n, 5, 32),
|engine| black_box(estimate_memory(&engine)),
criterion::BatchSize::LargeInput,
)
},
);
}
group.finish();
}
fn bench_decision_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/decision_throughput");
group.sample_size(50);
let engine = generate_coherence_graph(1000, 5, 32);
group.throughput(Throughput::Elements(1000));
group.bench_function("decisions_per_second", |b| {
b.iter(|| {
let mut count = 0;
for _ in 0..1000 {
let result = engine.compute_coherence();
match result.decision {
CoherenceDecision::Accept { .. } => count += 1,
CoherenceDecision::Reject { .. } => count += 1,
CoherenceDecision::Defer { .. } => count += 1,
}
}
black_box(count)
})
});
group.finish();
}
fn bench_scalability(c: &mut Criterion) {
let mut group = c.benchmark_group("integrated/scalability");
group.sample_size(10);
// Test scaling with both entities and constraints
for &(entities, avg_degree) in &[(500, 3), (500, 10), (1000, 3), (1000, 10), (2000, 5)] {
let engine = generate_coherence_graph(entities, avg_degree, 32);
let total_constraints = engine.constraints.len();
group.throughput(Throughput::Elements((entities + total_constraints) as u64));
group.bench_with_input(
BenchmarkId::new(format!("{}e_{}d", entities, avg_degree), entities),
&engine,
|b, engine| {
b.iter(|| black_box(engine.compute_coherence()))
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_end_to_end_coherence,
bench_component_breakdown,
bench_streaming_updates,
bench_batch_throughput,
bench_hierarchical_coherence,
bench_memory_scaling,
bench_decision_throughput,
bench_scalability,
);
criterion_main!(benches);

View File

@@ -0,0 +1,900 @@
//! Quantum and Algebraic Topology Benchmarks for Prime-Radiant
//!
//! Benchmarks for quantum-topological operations including:
//! - Persistent homology computation at various dimensions
//! - Topological invariant computation (Betti numbers, Euler characteristic)
//! - Quantum state operations (density matrices, fidelity)
//! - Simplicial complex construction and manipulation
//!
//! Target metrics:
//! - Persistent homology (1K points): < 100ms
//! - Betti numbers (dim 2): < 10ms
//! - Quantum fidelity: < 1ms per pair
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::collections::{BinaryHeap, HashMap, HashSet};
use std::cmp::Ordering;
// ============================================================================
// SIMPLICIAL COMPLEX TYPES
// ============================================================================
/// A simplex is an ordered set of vertices
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct Simplex {
vertices: Vec<usize>,
}
impl Simplex {
fn new(mut vertices: Vec<usize>) -> Self {
vertices.sort_unstable();
Self { vertices }
}
fn dimension(&self) -> usize {
if self.vertices.is_empty() {
0
} else {
self.vertices.len() - 1
}
}
fn faces(&self) -> Vec<Simplex> {
let mut faces = Vec::new();
for i in 0..self.vertices.len() {
let mut face_vertices = self.vertices.clone();
face_vertices.remove(i);
if !face_vertices.is_empty() {
faces.push(Simplex::new(face_vertices));
}
}
faces
}
}
/// Filtered simplicial complex for persistent homology
struct FilteredComplex {
simplices: Vec<(f64, Simplex)>, // (filtration value, simplex)
}
impl FilteredComplex {
fn new() -> Self {
Self { simplices: Vec::new() }
}
fn add(&mut self, filtration: f64, simplex: Simplex) {
self.simplices.push((filtration, simplex));
}
fn sort_by_filtration(&mut self) {
self.simplices.sort_by(|a, b| {
a.0.partial_cmp(&b.0).unwrap_or(Ordering::Equal)
.then_with(|| a.1.dimension().cmp(&b.1.dimension()))
});
}
}
// ============================================================================
// PERSISTENT HOMOLOGY
// ============================================================================
/// Birth-death pair representing a topological feature
#[derive(Clone, Debug)]
struct PersistencePair {
dimension: usize,
birth: f64,
death: f64,
}
impl PersistencePair {
fn persistence(&self) -> f64 {
self.death - self.birth
}
}
/// Union-Find data structure for 0-dimensional homology
struct UnionFind {
parent: Vec<usize>,
rank: Vec<usize>,
birth: Vec<f64>,
}
impl UnionFind {
fn new(n: usize) -> Self {
Self {
parent: (0..n).collect(),
rank: vec![0; n],
birth: vec![f64::INFINITY; n],
}
}
fn find(&mut self, x: usize) -> usize {
if self.parent[x] != x {
self.parent[x] = self.find(self.parent[x]);
}
self.parent[x]
}
fn union(&mut self, x: usize, y: usize) -> Option<(usize, usize)> {
let px = self.find(x);
let py = self.find(y);
if px == py {
return None;
}
// Younger component dies (larger birth time)
let (survivor, dying) = if self.birth[px] <= self.birth[py] {
(px, py)
} else {
(py, px)
};
if self.rank[px] < self.rank[py] {
self.parent[px] = py;
} else if self.rank[px] > self.rank[py] {
self.parent[py] = px;
} else {
self.parent[py] = px;
self.rank[px] += 1;
}
self.parent[dying] = survivor;
Some((dying, survivor))
}
fn set_birth(&mut self, x: usize, birth: f64) {
self.birth[x] = birth;
}
}
/// Compute persistent homology using standard algorithm
fn compute_persistent_homology(complex: &FilteredComplex, max_dim: usize) -> Vec<PersistencePair> {
let mut pairs = Vec::new();
let num_vertices = complex.simplices.iter()
.filter(|(_, s)| s.dimension() == 0)
.count();
// Union-find for H_0
let mut uf = UnionFind::new(num_vertices);
// Track active simplices for higher dimensions
let mut simplex_index: HashMap<Vec<usize>, usize> = HashMap::new();
let mut boundary_matrix: Vec<HashSet<usize>> = Vec::new();
let mut pivot_to_col: HashMap<usize, usize> = HashMap::new();
for (idx, (filtration, simplex)) in complex.simplices.iter().enumerate() {
let dim = simplex.dimension();
if dim == 0 {
// Vertex: creates a new H_0 class
let v = simplex.vertices[0];
uf.set_birth(v, *filtration);
simplex_index.insert(simplex.vertices.clone(), idx);
boundary_matrix.push(HashSet::new());
} else if dim == 1 {
// Edge: may kill H_0 class
let u = simplex.vertices[0];
let v = simplex.vertices[1];
if let Some((dying, _survivor)) = uf.union(u, v) {
let birth = uf.birth[dying];
if *filtration > birth {
pairs.push(PersistencePair {
dimension: 0,
birth,
death: *filtration,
});
}
}
// Add to boundary matrix for H_1
let mut boundary = HashSet::new();
for &vertex in &simplex.vertices {
if let Some(&face_idx) = simplex_index.get(&vec![vertex]) {
boundary.insert(face_idx);
}
}
simplex_index.insert(simplex.vertices.clone(), idx);
boundary_matrix.push(boundary);
} else if dim <= max_dim {
// Higher dimensional simplex
let faces = simplex.faces();
let mut boundary: HashSet<usize> = faces.iter()
.filter_map(|f| simplex_index.get(&f.vertices).copied())
.collect();
// Reduce boundary
while !boundary.is_empty() {
let pivot = *boundary.iter().max().unwrap();
if let Some(&other_col) = pivot_to_col.get(&pivot) {
// XOR with the column that has this pivot
let other_boundary = &boundary_matrix[other_col];
let symmetric_diff: HashSet<usize> = boundary
.symmetric_difference(other_boundary)
.copied()
.collect();
boundary = symmetric_diff;
} else {
// This column has a new pivot
pivot_to_col.insert(pivot, idx);
break;
}
}
if boundary.is_empty() {
// This simplex creates a new cycle (potential H_{dim-1} class)
// For simplicity, we just record it was created
} else {
// This simplex kills a cycle
let pivot = *boundary.iter().max().unwrap();
let birth_filtration = complex.simplices[pivot].0;
pairs.push(PersistencePair {
dimension: dim - 1,
birth: birth_filtration,
death: *filtration,
});
}
simplex_index.insert(simplex.vertices.clone(), idx);
boundary_matrix.push(boundary);
}
}
// Add infinite persistence pairs for surviving components
for i in 0..num_vertices {
if uf.find(i) == i && uf.birth[i] < f64::INFINITY {
pairs.push(PersistencePair {
dimension: 0,
birth: uf.birth[i],
death: f64::INFINITY,
});
}
}
pairs
}
/// Persistence diagram statistics
struct PersistenceStats {
total_features: usize,
max_persistence: f64,
mean_persistence: f64,
betti_at_threshold: Vec<usize>,
}
fn compute_persistence_stats(pairs: &[PersistencePair], threshold: f64, max_dim: usize) -> PersistenceStats {
let finite_pairs: Vec<_> = pairs.iter()
.filter(|p| p.death.is_finite())
.collect();
let persistences: Vec<f64> = finite_pairs.iter()
.map(|p| p.persistence())
.collect();
let max_persistence = persistences.iter().cloned().fold(0.0f64, f64::max);
let mean_persistence = if persistences.is_empty() {
0.0
} else {
persistences.iter().sum::<f64>() / persistences.len() as f64
};
// Betti numbers at threshold
let mut betti = vec![0; max_dim + 1];
for pair in pairs {
if pair.birth <= threshold && (pair.death.is_infinite() || pair.death > threshold) {
if pair.dimension <= max_dim {
betti[pair.dimension] += 1;
}
}
}
PersistenceStats {
total_features: pairs.len(),
max_persistence,
mean_persistence,
betti_at_threshold: betti,
}
}
// ============================================================================
// QUANTUM STATE OPERATIONS
// ============================================================================
/// Complex number (simplified for benchmarking)
#[derive(Clone, Copy, Debug)]
struct Complex {
re: f64,
im: f64,
}
impl Complex {
fn new(re: f64, im: f64) -> Self {
Self { re, im }
}
fn norm_squared(&self) -> f64 {
self.re * self.re + self.im * self.im
}
fn conjugate(&self) -> Self {
Self { re: self.re, im: -self.im }
}
fn mul(&self, other: &Self) -> Self {
Self {
re: self.re * other.re - self.im * other.im,
im: self.re * other.im + self.im * other.re,
}
}
fn add(&self, other: &Self) -> Self {
Self {
re: self.re + other.re,
im: self.im + other.im,
}
}
fn scale(&self, s: f64) -> Self {
Self {
re: self.re * s,
im: self.im * s,
}
}
}
/// Density matrix for mixed quantum states
struct DensityMatrix {
dimension: usize,
data: Vec<Vec<Complex>>,
}
impl DensityMatrix {
fn new(dimension: usize) -> Self {
Self {
dimension,
data: vec![vec![Complex::new(0.0, 0.0); dimension]; dimension],
}
}
fn from_pure_state(state: &[Complex]) -> Self {
let n = state.len();
let mut dm = DensityMatrix::new(n);
for i in 0..n {
for j in 0..n {
dm.data[i][j] = state[i].mul(&state[j].conjugate());
}
}
dm
}
fn trace(&self) -> Complex {
let mut sum = Complex::new(0.0, 0.0);
for i in 0..self.dimension {
sum = sum.add(&self.data[i][i]);
}
sum
}
fn multiply(&self, other: &DensityMatrix) -> DensityMatrix {
let n = self.dimension;
let mut result = DensityMatrix::new(n);
for i in 0..n {
for j in 0..n {
let mut sum = Complex::new(0.0, 0.0);
for k in 0..n {
sum = sum.add(&self.data[i][k].mul(&other.data[k][j]));
}
result.data[i][j] = sum;
}
}
result
}
/// Compute sqrt(rho) approximately using Newton's method
fn sqrt_approx(&self, iterations: usize) -> DensityMatrix {
let n = self.dimension;
// Start with identity matrix
let mut y = DensityMatrix::new(n);
for i in 0..n {
y.data[i][i] = Complex::new(1.0, 0.0);
}
// Denman-Beavers iteration: Y_{k+1} = (Y_k + Y_k^{-1} * A) / 2
// Simplified: just use Newton iteration Y = (Y + A/Y) / 2
for _ in 0..iterations {
let y_inv = self.clone(); // Simplified: use original matrix
let sum = y.add(&y_inv);
y = sum.scale_all(0.5);
}
y
}
fn add(&self, other: &DensityMatrix) -> DensityMatrix {
let n = self.dimension;
let mut result = DensityMatrix::new(n);
for i in 0..n {
for j in 0..n {
result.data[i][j] = self.data[i][j].add(&other.data[i][j]);
}
}
result
}
fn scale_all(&self, s: f64) -> DensityMatrix {
let n = self.dimension;
let mut result = DensityMatrix::new(n);
for i in 0..n {
for j in 0..n {
result.data[i][j] = self.data[i][j].scale(s);
}
}
result
}
}
impl Clone for DensityMatrix {
fn clone(&self) -> Self {
Self {
dimension: self.dimension,
data: self.data.clone(),
}
}
}
/// Quantum fidelity between two density matrices
/// F(rho, sigma) = (Tr sqrt(sqrt(rho) sigma sqrt(rho)))^2
fn quantum_fidelity(rho: &DensityMatrix, sigma: &DensityMatrix) -> f64 {
// Simplified computation for benchmarking
// Full computation would require eigendecomposition
let sqrt_rho = rho.sqrt_approx(5);
let inner = sqrt_rho.multiply(sigma).multiply(&sqrt_rho);
let sqrt_inner = inner.sqrt_approx(5);
let trace = sqrt_inner.trace();
trace.re * trace.re + trace.im * trace.im
}
/// Trace distance between density matrices
/// D(rho, sigma) = (1/2) Tr |rho - sigma|
fn trace_distance(rho: &DensityMatrix, sigma: &DensityMatrix) -> f64 {
let n = rho.dimension;
let mut sum = 0.0;
// Simplified: use Frobenius norm as approximation
for i in 0..n {
for j in 0..n {
let diff = Complex {
re: rho.data[i][j].re - sigma.data[i][j].re,
im: rho.data[i][j].im - sigma.data[i][j].im,
};
sum += diff.norm_squared();
}
}
0.5 * sum.sqrt()
}
/// Von Neumann entropy: S(rho) = -Tr(rho log rho)
fn von_neumann_entropy(rho: &DensityMatrix) -> f64 {
// Simplified: compute diagonal entropy approximation
let mut entropy = 0.0;
for i in 0..rho.dimension {
let p = rho.data[i][i].re;
if p > 1e-10 {
entropy -= p * p.ln();
}
}
entropy
}
// ============================================================================
// TOPOLOGICAL INVARIANTS
// ============================================================================
/// Compute Euler characteristic: chi = V - E + F - ...
fn euler_characteristic(complex: &FilteredComplex) -> i64 {
let mut chi = 0i64;
for (_, simplex) in &complex.simplices {
let dim = simplex.dimension();
if dim % 2 == 0 {
chi += 1;
} else {
chi -= 1;
}
}
chi
}
/// Betti numbers via boundary matrix rank
fn betti_numbers(complex: &FilteredComplex, max_dim: usize) -> Vec<usize> {
// Count simplices by dimension
let mut counts = vec![0usize; max_dim + 2];
for (_, simplex) in &complex.simplices {
let dim = simplex.dimension();
if dim <= max_dim + 1 {
counts[dim] += 1;
}
}
// Simplified Betti number estimation
// beta_k = dim(ker d_k) - dim(im d_{k+1})
// Approximation: beta_k ~ C_k - C_{k+1} for highly connected complexes
let mut betti = vec![0usize; max_dim + 1];
for k in 0..=max_dim {
let c_k = counts[k];
let c_k1 = if k + 1 <= max_dim + 1 { counts[k + 1] } else { 0 };
// Very rough approximation
betti[k] = if c_k > c_k1 { c_k - c_k1 } else { 1 };
}
// Ensure beta_0 >= 1 (at least one connected component)
if betti[0] == 0 {
betti[0] = 1;
}
betti
}
// ============================================================================
// DATA GENERATORS
// ============================================================================
fn generate_rips_complex(points: &[(f64, f64)], max_radius: f64, max_dim: usize) -> FilteredComplex {
let n = points.len();
let mut complex = FilteredComplex::new();
// Add vertices (0-simplices)
for i in 0..n {
complex.add(0.0, Simplex::new(vec![i]));
}
// Compute pairwise distances
let mut edges: Vec<(f64, usize, usize)> = Vec::new();
for i in 0..n {
for j in (i + 1)..n {
let dist = ((points[i].0 - points[j].0).powi(2)
+ (points[i].1 - points[j].1).powi(2))
.sqrt();
if dist <= max_radius {
edges.push((dist, i, j));
}
}
}
// Add edges (1-simplices)
for (dist, i, j) in &edges {
complex.add(*dist, Simplex::new(vec![*i, *j]));
}
// Add triangles (2-simplices) if max_dim >= 2
if max_dim >= 2 {
// Build adjacency
let mut adj: HashMap<usize, HashSet<usize>> = HashMap::new();
let mut edge_dist: HashMap<(usize, usize), f64> = HashMap::new();
for (dist, i, j) in &edges {
adj.entry(*i).or_default().insert(*j);
adj.entry(*j).or_default().insert(*i);
edge_dist.insert(((*i).min(*j), (*i).max(*j)), *dist);
}
for i in 0..n {
if let Some(neighbors_i) = adj.get(&i) {
for &j in neighbors_i {
if j > i {
if let Some(neighbors_j) = adj.get(&j) {
for &k in neighbors_j {
if k > j && neighbors_i.contains(&k) {
// Found triangle (i, j, k)
let d_ij = edge_dist.get(&(i, j)).unwrap_or(&0.0);
let d_jk = edge_dist.get(&(j, k)).unwrap_or(&0.0);
let d_ik = edge_dist.get(&(i, k)).unwrap_or(&0.0);
let max_dist = d_ij.max(*d_jk).max(*d_ik);
complex.add(max_dist, Simplex::new(vec![i, j, k]));
}
}
}
}
}
}
}
}
complex.sort_by_filtration();
complex
}
fn generate_random_points(num_points: usize, seed: u64) -> Vec<(f64, f64)> {
let mut rng_state = seed;
let mut points = Vec::with_capacity(num_points);
for _ in 0..num_points {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let x = (rng_state >> 33) as f64 / (u32::MAX as f64);
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let y = (rng_state >> 33) as f64 / (u32::MAX as f64);
points.push((x, y));
}
points
}
fn generate_random_quantum_state(dimension: usize, seed: u64) -> Vec<Complex> {
let mut rng_state = seed;
let mut state = Vec::with_capacity(dimension);
let mut norm_sq = 0.0;
for _ in 0..dimension {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let re = ((rng_state >> 33) as f64 / (u32::MAX as f64)) * 2.0 - 1.0;
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let im = ((rng_state >> 33) as f64 / (u32::MAX as f64)) * 2.0 - 1.0;
let c = Complex::new(re, im);
norm_sq += c.norm_squared();
state.push(c);
}
// Normalize
let norm = norm_sq.sqrt();
for c in &mut state {
*c = c.scale(1.0 / norm);
}
state
}
// ============================================================================
// BENCHMARKS
// ============================================================================
fn bench_persistent_homology(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/persistent_homology");
group.sample_size(20);
for &num_points in &[100, 250, 500, 1000] {
let points = generate_random_points(num_points, 42);
let radius = 0.2;
let complex = generate_rips_complex(&points, radius, 2);
group.throughput(Throughput::Elements(num_points as u64));
group.bench_with_input(
BenchmarkId::new("dim2", num_points),
&complex,
|b, complex| {
b.iter(|| black_box(compute_persistent_homology(black_box(complex), 2)))
},
);
}
group.finish();
}
fn bench_persistence_stats(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/persistence_stats");
group.sample_size(50);
for &num_points in &[100, 500, 1000] {
let points = generate_random_points(num_points, 42);
let complex = generate_rips_complex(&points, 0.2, 2);
let pairs = compute_persistent_homology(&complex, 2);
group.throughput(Throughput::Elements(pairs.len() as u64));
group.bench_with_input(
BenchmarkId::new("compute", num_points),
&pairs,
|b, pairs| {
b.iter(|| black_box(compute_persistence_stats(black_box(pairs), 0.1, 2)))
},
);
}
group.finish();
}
fn bench_topological_invariants(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/invariants");
group.sample_size(50);
for &num_points in &[100, 500, 1000] {
let points = generate_random_points(num_points, 42);
let complex = generate_rips_complex(&points, 0.2, 2);
group.throughput(Throughput::Elements(complex.simplices.len() as u64));
group.bench_with_input(
BenchmarkId::new("euler", num_points),
&complex,
|b, complex| {
b.iter(|| black_box(euler_characteristic(black_box(complex))))
},
);
group.bench_with_input(
BenchmarkId::new("betti", num_points),
&complex,
|b, complex| {
b.iter(|| black_box(betti_numbers(black_box(complex), 2)))
},
);
}
group.finish();
}
fn bench_rips_construction(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/rips_construction");
group.sample_size(20);
for &num_points in &[100, 250, 500, 1000] {
let points = generate_random_points(num_points, 42);
group.throughput(Throughput::Elements(num_points as u64));
group.bench_with_input(
BenchmarkId::new("dim2", num_points),
&points,
|b, points| {
b.iter(|| black_box(generate_rips_complex(black_box(points), 0.15, 2)))
},
);
group.bench_with_input(
BenchmarkId::new("dim1", num_points),
&points,
|b, points| {
b.iter(|| black_box(generate_rips_complex(black_box(points), 0.15, 1)))
},
);
}
group.finish();
}
fn bench_quantum_fidelity(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/fidelity");
group.sample_size(50);
for &dim in &[4, 8, 16, 32] {
let state1 = generate_random_quantum_state(dim, 42);
let state2 = generate_random_quantum_state(dim, 43);
let rho = DensityMatrix::from_pure_state(&state1);
let sigma = DensityMatrix::from_pure_state(&state2);
group.throughput(Throughput::Elements((dim * dim) as u64));
group.bench_with_input(
BenchmarkId::new("pure_states", dim),
&(&rho, &sigma),
|b, (rho, sigma)| {
b.iter(|| black_box(quantum_fidelity(black_box(rho), black_box(sigma))))
},
);
group.bench_with_input(
BenchmarkId::new("trace_distance", dim),
&(&rho, &sigma),
|b, (rho, sigma)| {
b.iter(|| black_box(trace_distance(black_box(rho), black_box(sigma))))
},
);
}
group.finish();
}
fn bench_density_matrix_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/density_matrix");
group.sample_size(50);
for &dim in &[4, 8, 16, 32, 64] {
let state = generate_random_quantum_state(dim, 42);
let rho = DensityMatrix::from_pure_state(&state);
group.throughput(Throughput::Elements((dim * dim) as u64));
group.bench_with_input(
BenchmarkId::new("from_pure_state", dim),
&state,
|b, state| {
b.iter(|| black_box(DensityMatrix::from_pure_state(black_box(state))))
},
);
group.bench_with_input(
BenchmarkId::new("multiply", dim),
&rho,
|b, rho| {
b.iter(|| black_box(rho.multiply(black_box(rho))))
},
);
group.bench_with_input(
BenchmarkId::new("trace", dim),
&rho,
|b, rho| {
b.iter(|| black_box(rho.trace()))
},
);
group.bench_with_input(
BenchmarkId::new("von_neumann_entropy", dim),
&rho,
|b, rho| {
b.iter(|| black_box(von_neumann_entropy(black_box(rho))))
},
);
}
group.finish();
}
fn bench_simplex_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum/simplex");
group.sample_size(100);
for &dim in &[3, 5, 7, 10] {
let vertices: Vec<usize> = (0..dim).collect();
let simplex = Simplex::new(vertices.clone());
group.throughput(Throughput::Elements(dim as u64));
group.bench_with_input(
BenchmarkId::new("create", dim),
&vertices,
|b, vertices| {
b.iter(|| black_box(Simplex::new(black_box(vertices.clone()))))
},
);
group.bench_with_input(
BenchmarkId::new("faces", dim),
&simplex,
|b, simplex| {
b.iter(|| black_box(simplex.faces()))
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_persistent_homology,
bench_persistence_stats,
bench_topological_invariants,
bench_rips_construction,
bench_quantum_fidelity,
bench_density_matrix_operations,
bench_simplex_operations,
);
criterion_main!(benches);

View File

@@ -0,0 +1,76 @@
//! Quantum Solver Benchmarks — effective qubit scaling with sparse operators
//!
//! Benchmarks the SolverBackedOperator for 10 to 20+ effective qubits,
//! measuring SpMV performance, eigenvalue convergence, and memory scaling.
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use prime_radiant_category::quantum::topological_code::SolverBackedOperator;
fn spmv_scaling(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum_spmv");
group.sample_size(20);
for qubits in [10, 12, 14, 16, 18, 20] {
let bandwidth = 4.min(1 << qubits);
let op = SolverBackedOperator::banded(qubits, bandwidth, 42);
let state = vec![1.0 / ((1u64 << qubits) as f64).sqrt(); 1 << qubits];
group.bench_with_input(
BenchmarkId::new("apply", qubits),
&qubits,
|b, _| {
b.iter(|| black_box(op.apply(&state)));
},
);
}
group.finish();
}
fn eigenvalue_convergence(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum_eigenvalue");
group.sample_size(10);
for qubits in [10, 12, 14, 16] {
let op = SolverBackedOperator::banded(qubits, 4, 42);
group.bench_with_input(
BenchmarkId::new("power_iteration", qubits),
&qubits,
|b, _| {
b.iter(|| black_box(op.dominant_eigenvalue(50, 1e-8)));
},
);
}
group.finish();
}
fn memory_scaling(c: &mut Criterion) {
let mut group = c.benchmark_group("quantum_memory");
for qubits in [10, 15, 20, 25, 30] {
// Just measure construction time (memory is logged)
group.bench_with_input(
BenchmarkId::new("construct_banded", qubits),
&qubits,
|b, &q| {
b.iter(|| {
let op = SolverBackedOperator::banded(q, 4, 42);
black_box((op.nnz(), op.memory_bytes(), op.dense_memory_bytes()))
});
},
);
}
group.finish();
}
criterion_group!(
benches,
spmv_scaling,
eigenvalue_convergence,
memory_scaling,
);
criterion_main!(benches);

View File

@@ -0,0 +1,741 @@
//! Spectral Analysis Benchmarks for Prime-Radiant
//!
//! Benchmarks for spectral graph theory computations including:
//! - Eigenvalue computation (power iteration vs Lanczos)
//! - Cheeger constant computation
//! - Spectral clustering
//! - SIMD-accelerated operations
//!
//! Target metrics:
//! - Eigenvalue (power iteration): < 5ms for 1K nodes
//! - Eigenvalue (Lanczos): < 50ms for 10K nodes
//! - Cheeger constant: < 10ms for 1K nodes
//! - Spectral clustering: < 100ms for 5K nodes
use criterion::{
black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput,
};
use std::collections::HashSet;
// ============================================================================
// SPARSE MATRIX TYPES
// ============================================================================
/// CSR (Compressed Sparse Row) format for efficient matrix-vector multiplication
#[derive(Clone)]
struct CsrMatrix {
rows: usize,
cols: usize,
row_ptr: Vec<usize>,
col_indices: Vec<usize>,
values: Vec<f64>,
}
impl CsrMatrix {
fn from_edges(num_nodes: usize, edges: &[(usize, usize)]) -> Self {
// Build adjacency lists
let mut adj: Vec<Vec<(usize, f64)>> = vec![Vec::new(); num_nodes];
let mut degrees = vec![0.0; num_nodes];
for &(u, v) in edges {
adj[u].push((v, -1.0));
adj[v].push((u, -1.0));
degrees[u] += 1.0;
degrees[v] += 1.0;
}
// Build CSR representation of Laplacian
let mut row_ptr = vec![0];
let mut col_indices = Vec::new();
let mut values = Vec::new();
for i in 0..num_nodes {
// Add diagonal (degree)
col_indices.push(i);
values.push(degrees[i]);
// Add off-diagonal entries
adj[i].sort_by_key(|&(j, _)| j);
for &(j, val) in &adj[i] {
col_indices.push(j);
values.push(val);
}
row_ptr.push(col_indices.len());
}
Self {
rows: num_nodes,
cols: num_nodes,
row_ptr,
col_indices,
values,
}
}
fn matvec(&self, x: &[f64]) -> Vec<f64> {
let mut y = vec![0.0; self.rows];
for i in 0..self.rows {
let start = self.row_ptr[i];
let end = self.row_ptr[i + 1];
let mut sum = 0.0;
for k in start..end {
sum += self.values[k] * x[self.col_indices[k]];
}
y[i] = sum;
}
y
}
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
fn matvec_simd(&self, x: &[f64]) -> Vec<f64> {
let mut y = vec![0.0; self.rows];
for i in 0..self.rows {
let start = self.row_ptr[i];
let end = self.row_ptr[i + 1];
let len = end - start;
// Process in chunks of 4 for SIMD
let mut sum = 0.0;
let chunks = len / 4;
let remainder = len % 4;
for c in 0..chunks {
let base = start + c * 4;
let v0 = self.values[base] * x[self.col_indices[base]];
let v1 = self.values[base + 1] * x[self.col_indices[base + 1]];
let v2 = self.values[base + 2] * x[self.col_indices[base + 2]];
let v3 = self.values[base + 3] * x[self.col_indices[base + 3]];
sum += v0 + v1 + v2 + v3;
}
for k in (start + chunks * 4)..(start + chunks * 4 + remainder) {
sum += self.values[k] * x[self.col_indices[k]];
}
y[i] = sum;
}
y
}
}
// ============================================================================
// EIGENVALUE COMPUTATION
// ============================================================================
/// Power iteration for largest eigenvalue
fn power_iteration(matrix: &CsrMatrix, max_iter: usize, tol: f64) -> (f64, Vec<f64>) {
let n = matrix.rows;
if n == 0 {
return (0.0, Vec::new());
}
// Initialize with random-ish vector
let mut v: Vec<f64> = (0..n).map(|i| ((i as f64 + 1.0).sqrt()).sin()).collect();
let mut eigenvalue = 0.0;
// Normalize
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > 1e-10 {
for x in &mut v {
*x /= norm;
}
}
for _ in 0..max_iter {
// y = Ax
let y = matrix.matvec(&v);
// Rayleigh quotient: eigenvalue = v^T y / v^T v
let new_eigenvalue: f64 = v.iter().zip(y.iter()).map(|(a, b)| a * b).sum();
// Normalize y
let norm: f64 = y.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm < 1e-10 {
break;
}
v = y.iter().map(|x| x / norm).collect();
// Check convergence
if (new_eigenvalue - eigenvalue).abs() < tol {
eigenvalue = new_eigenvalue;
break;
}
eigenvalue = new_eigenvalue;
}
(eigenvalue, v)
}
/// Lanczos algorithm for multiple eigenvalues
struct LanczosComputation {
tridiag_alpha: Vec<f64>,
tridiag_beta: Vec<f64>,
basis_vectors: Vec<Vec<f64>>,
}
impl LanczosComputation {
fn compute(matrix: &CsrMatrix, num_eigenvalues: usize, max_iter: usize) -> Self {
let n = matrix.rows;
let k = num_eigenvalues.min(max_iter).min(n);
let mut alpha = Vec::with_capacity(k);
let mut beta = Vec::with_capacity(k);
let mut basis = Vec::with_capacity(k + 1);
// Start with random vector
let mut v: Vec<f64> = (0..n).map(|i| ((i as f64 + 1.0).sqrt()).sin()).collect();
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > 1e-10 {
for x in &mut v {
*x /= norm;
}
}
basis.push(v.clone());
let mut w = matrix.matvec(&v);
for i in 0..k {
// alpha_i = v_i^T w
let a: f64 = basis[i].iter().zip(w.iter()).map(|(a, b)| a * b).sum();
alpha.push(a);
// w = w - alpha_i v_i
for (j, wj) in w.iter_mut().enumerate() {
*wj -= a * basis[i][j];
}
// w = w - beta_{i-1} v_{i-1}
if i > 0 && i - 1 < beta.len() {
let b = beta[i - 1];
for (j, wj) in w.iter_mut().enumerate() {
*wj -= b * basis[i - 1][j];
}
}
// beta_i = ||w||
let b: f64 = w.iter().map(|x| x * x).sum::<f64>().sqrt();
if b < 1e-10 || i + 1 >= k {
break;
}
beta.push(b);
// v_{i+1} = w / beta_i
let new_v: Vec<f64> = w.iter().map(|x| x / b).collect();
basis.push(new_v.clone());
// w = A v_{i+1}
w = matrix.matvec(&new_v);
}
Self {
tridiag_alpha: alpha,
tridiag_beta: beta,
basis_vectors: basis,
}
}
fn eigenvalues(&self) -> Vec<f64> {
// Compute eigenvalues of tridiagonal matrix using QR iteration
let n = self.tridiag_alpha.len();
if n == 0 {
return Vec::new();
}
let mut d = self.tridiag_alpha.clone();
let mut e = self.tridiag_beta.clone();
// Simple eigenvalue estimation using Gershgorin circles
let mut eigenvalues = Vec::with_capacity(n);
for i in 0..n {
let off_diag = if i > 0 && i - 1 < e.len() { e[i - 1].abs() } else { 0.0 }
+ if i < e.len() { e[i].abs() } else { 0.0 };
eigenvalues.push(d[i] + off_diag * 0.5); // Center of Gershgorin disk
}
eigenvalues.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
eigenvalues
}
}
// ============================================================================
// CHEEGER CONSTANT
// ============================================================================
/// Compute Cheeger constant (isoperimetric number) approximation
struct CheegerComputation {
graph_edges: Vec<(usize, usize)>,
num_nodes: usize,
}
impl CheegerComputation {
fn new(num_nodes: usize, edges: Vec<(usize, usize)>) -> Self {
Self {
graph_edges: edges,
num_nodes,
}
}
/// Approximate Cheeger constant using spectral methods
/// h(G) >= lambda_2 / 2 (Cheeger inequality)
fn compute_spectral_lower_bound(&self) -> f64 {
let laplacian = CsrMatrix::from_edges(self.num_nodes, &self.graph_edges);
// Find second smallest eigenvalue using deflation
let (lambda_1, v1) = power_iteration(&laplacian, 100, 1e-8);
// Shift to find lambda_2
// We use a simplified approach: estimate from Fiedler vector
let fiedler = self.compute_fiedler_vector(&laplacian, &v1);
let lambda_2 = self.rayleigh_quotient(&laplacian, &fiedler);
lambda_2 / 2.0
}
fn compute_fiedler_vector(&self, laplacian: &CsrMatrix, ground_state: &[f64]) -> Vec<f64> {
let n = laplacian.rows;
// Start with vector orthogonal to ground state
let mut v: Vec<f64> = (0..n).map(|i| ((i as f64 * 2.0 + 1.0).sqrt()).cos()).collect();
// Gram-Schmidt orthogonalization against ground state
let dot: f64 = v.iter().zip(ground_state.iter()).map(|(a, b)| a * b).sum();
for (i, vi) in v.iter_mut().enumerate() {
*vi -= dot * ground_state[i];
}
// Normalize
let norm: f64 = v.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > 1e-10 {
for vi in &mut v {
*vi /= norm;
}
}
// A few power iterations with orthogonalization
for _ in 0..50 {
let mut y = laplacian.matvec(&v);
// Orthogonalize against ground state
let dot: f64 = y.iter().zip(ground_state.iter()).map(|(a, b)| a * b).sum();
for (i, yi) in y.iter_mut().enumerate() {
*yi -= dot * ground_state[i];
}
// Normalize
let norm: f64 = y.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm < 1e-10 {
break;
}
v = y.iter().map(|x| x / norm).collect();
}
v
}
fn rayleigh_quotient(&self, laplacian: &CsrMatrix, v: &[f64]) -> f64 {
let lv = laplacian.matvec(v);
let numerator: f64 = v.iter().zip(lv.iter()).map(|(a, b)| a * b).sum();
let denominator: f64 = v.iter().map(|x| x * x).sum();
if denominator > 1e-10 {
numerator / denominator
} else {
0.0
}
}
/// Direct Cheeger constant computation via sweep cut on Fiedler vector
fn compute_sweep_cut(&self) -> f64 {
let laplacian = CsrMatrix::from_edges(self.num_nodes, &self.graph_edges);
let (_, v1) = power_iteration(&laplacian, 100, 1e-8);
let fiedler = self.compute_fiedler_vector(&laplacian, &v1);
// Sort vertices by Fiedler vector values
let mut indices: Vec<usize> = (0..self.num_nodes).collect();
indices.sort_by(|&a, &b| {
fiedler[a].partial_cmp(&fiedler[b]).unwrap_or(std::cmp::Ordering::Equal)
});
// Sweep through cuts
let mut min_cheeger = f64::MAX;
let mut cut_edges = 0;
let mut left_set: HashSet<usize> = HashSet::new();
for &idx in indices.iter().take(self.num_nodes - 1) {
left_set.insert(idx);
// Update cut size
for &(u, v) in &self.graph_edges {
let u_in = left_set.contains(&u);
let v_in = left_set.contains(&v);
if u_in != v_in {
if (u_in && u == idx) || (v_in && v == idx) {
cut_edges += 1;
}
}
}
// Compute Cheeger ratio
let left_size = left_set.len();
let right_size = self.num_nodes - left_size;
let min_size = left_size.min(right_size);
if min_size > 0 {
let ratio = cut_edges as f64 / min_size as f64;
min_cheeger = min_cheeger.min(ratio);
}
}
min_cheeger
}
}
// ============================================================================
// SPECTRAL CLUSTERING
// ============================================================================
struct SpectralClustering {
num_clusters: usize,
eigenvectors: Vec<Vec<f64>>,
}
impl SpectralClustering {
fn compute(matrix: &CsrMatrix, num_clusters: usize) -> Self {
let lanczos = LanczosComputation::compute(matrix, num_clusters + 1, 100);
// Get first k eigenvectors (corresponding to smallest eigenvalues)
let eigenvectors = lanczos.basis_vectors.into_iter().take(num_clusters).collect();
Self {
num_clusters,
eigenvectors,
}
}
fn cluster_assignments(&self) -> Vec<usize> {
let n = if self.eigenvectors.is_empty() {
0
} else {
self.eigenvectors[0].len()
};
if n == 0 || self.eigenvectors.is_empty() {
return Vec::new();
}
// Simple k-means on spectral embedding
let k = self.num_clusters;
let dim = self.eigenvectors.len();
// Extract embedding matrix (n x dim)
let embedding: Vec<Vec<f64>> = (0..n)
.map(|i| self.eigenvectors.iter().map(|v| v[i]).collect())
.collect();
// Initialize centroids
let mut centroids: Vec<Vec<f64>> = (0..k)
.map(|i| embedding[i * n / k].clone())
.collect();
let mut assignments = vec![0; n];
// K-means iterations
for _ in 0..20 {
// Assign points to nearest centroid
for (i, point) in embedding.iter().enumerate() {
let mut min_dist = f64::MAX;
for (j, centroid) in centroids.iter().enumerate() {
let dist: f64 = point
.iter()
.zip(centroid.iter())
.map(|(a, b)| (a - b).powi(2))
.sum();
if dist < min_dist {
min_dist = dist;
assignments[i] = j;
}
}
}
// Update centroids
let mut counts = vec![0usize; k];
let mut new_centroids = vec![vec![0.0; dim]; k];
for (i, point) in embedding.iter().enumerate() {
let cluster = assignments[i];
counts[cluster] += 1;
for (j, &val) in point.iter().enumerate() {
new_centroids[cluster][j] += val;
}
}
for (j, centroid) in new_centroids.iter_mut().enumerate() {
if counts[j] > 0 {
for val in centroid.iter_mut() {
*val /= counts[j] as f64;
}
}
}
centroids = new_centroids;
}
assignments
}
}
// ============================================================================
// GRAPH GENERATORS
// ============================================================================
fn generate_random_graph(num_nodes: usize, edge_probability: f64, seed: u64) -> Vec<(usize, usize)> {
let mut edges = Vec::new();
let mut rng_state = seed;
for i in 0..num_nodes {
for j in (i + 1)..num_nodes {
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let random = (rng_state >> 33) as f64 / (u32::MAX as f64);
if random < edge_probability {
edges.push((i, j));
}
}
}
edges
}
fn generate_planted_partition(
num_clusters: usize,
cluster_size: usize,
p_in: f64,
p_out: f64,
seed: u64,
) -> Vec<(usize, usize)> {
let num_nodes = num_clusters * cluster_size;
let mut edges = Vec::new();
let mut rng_state = seed;
for i in 0..num_nodes {
for j in (i + 1)..num_nodes {
let cluster_i = i / cluster_size;
let cluster_j = j / cluster_size;
let prob = if cluster_i == cluster_j { p_in } else { p_out };
rng_state = rng_state.wrapping_mul(6364136223846793005).wrapping_add(1);
let random = (rng_state >> 33) as f64 / (u32::MAX as f64);
if random < prob {
edges.push((i, j));
}
}
}
edges
}
// ============================================================================
// BENCHMARKS
// ============================================================================
fn bench_power_iteration(c: &mut Criterion) {
let mut group = c.benchmark_group("spectral/power_iteration");
group.sample_size(30);
for &num_nodes in &[100, 500, 1000, 2000, 5000] {
let edges = generate_random_graph(num_nodes, 5.0 / num_nodes as f64, 42);
let matrix = CsrMatrix::from_edges(num_nodes, &edges);
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("standard", num_nodes),
&matrix,
|b, matrix| {
b.iter(|| {
black_box(power_iteration(black_box(matrix), 100, 1e-8))
})
},
);
}
group.finish();
}
fn bench_lanczos(c: &mut Criterion) {
let mut group = c.benchmark_group("spectral/lanczos");
group.sample_size(20);
for &num_nodes in &[500, 1000, 2000, 5000, 10000] {
let edges = generate_random_graph(num_nodes, 5.0 / num_nodes as f64, 42);
let matrix = CsrMatrix::from_edges(num_nodes, &edges);
group.throughput(Throughput::Elements(num_nodes as u64));
for &num_eig in &[5, 10, 20] {
group.bench_with_input(
BenchmarkId::new(format!("{}_eigenvalues", num_eig), num_nodes),
&(&matrix, num_eig),
|b, (matrix, k)| {
b.iter(|| {
let lanczos = LanczosComputation::compute(black_box(matrix), *k, 100);
black_box(lanczos.eigenvalues())
})
},
);
}
}
group.finish();
}
fn bench_cheeger_constant(c: &mut Criterion) {
let mut group = c.benchmark_group("spectral/cheeger");
group.sample_size(20);
for &num_nodes in &[100, 500, 1000, 2000] {
let edges = generate_random_graph(num_nodes, 5.0 / num_nodes as f64, 42);
let cheeger = CheegerComputation::new(num_nodes, edges);
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("spectral_bound", num_nodes),
&cheeger,
|b, cheeger| {
b.iter(|| {
black_box(cheeger.compute_spectral_lower_bound())
})
},
);
group.bench_with_input(
BenchmarkId::new("sweep_cut", num_nodes),
&cheeger,
|b, cheeger| {
b.iter(|| {
black_box(cheeger.compute_sweep_cut())
})
},
);
}
group.finish();
}
fn bench_spectral_clustering(c: &mut Criterion) {
let mut group = c.benchmark_group("spectral/clustering");
group.sample_size(20);
for &cluster_size in &[50, 100, 200, 500] {
let num_clusters = 5;
let num_nodes = num_clusters * cluster_size;
let edges = generate_planted_partition(num_clusters, cluster_size, 0.3, 0.01, 42);
let matrix = CsrMatrix::from_edges(num_nodes, &edges);
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("compute_embedding", num_nodes),
&(&matrix, num_clusters),
|b, (matrix, k)| {
b.iter(|| {
black_box(SpectralClustering::compute(black_box(matrix), *k))
})
},
);
let clustering = SpectralClustering::compute(&matrix, num_clusters);
group.bench_with_input(
BenchmarkId::new("assign_clusters", num_nodes),
&clustering,
|b, clustering| {
b.iter(|| {
black_box(clustering.cluster_assignments())
})
},
);
}
group.finish();
}
fn bench_matvec_simd(c: &mut Criterion) {
let mut group = c.benchmark_group("spectral/matvec");
group.sample_size(50);
for &num_nodes in &[1000, 5000, 10000] {
let edges = generate_random_graph(num_nodes, 10.0 / num_nodes as f64, 42);
let matrix = CsrMatrix::from_edges(num_nodes, &edges);
let x: Vec<f64> = (0..num_nodes).map(|i| (i as f64).sin()).collect();
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("standard", num_nodes),
&(&matrix, &x),
|b, (matrix, x)| {
b.iter(|| {
black_box(matrix.matvec(black_box(x)))
})
},
);
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
group.bench_with_input(
BenchmarkId::new("simd", num_nodes),
&(&matrix, &x),
|b, (matrix, x)| {
b.iter(|| {
black_box(matrix.matvec_simd(black_box(x)))
})
},
);
}
group.finish();
}
fn bench_graph_laplacian_construction(c: &mut Criterion) {
let mut group = c.benchmark_group("spectral/laplacian_construction");
group.sample_size(30);
for &num_nodes in &[500, 1000, 5000, 10000] {
let edges = generate_random_graph(num_nodes, 5.0 / num_nodes as f64, 42);
group.throughput(Throughput::Elements(num_nodes as u64));
group.bench_with_input(
BenchmarkId::new("csr_format", num_nodes),
&(num_nodes, &edges),
|b, (n, edges)| {
b.iter(|| {
black_box(CsrMatrix::from_edges(*n, black_box(edges)))
})
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_power_iteration,
bench_lanczos,
bench_cheeger_constant,
bench_spectral_clustering,
bench_matvec_simd,
bench_graph_laplacian_construction,
);
criterion_main!(benches);