Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,470 @@
//! Cocycle and Coboundary Operations
//!
//! Cocycles are the building blocks of cohomology. A cocycle is a cochain
//! that is in the kernel of the coboundary operator.
use super::sheaf::{Sheaf, SheafSection};
use super::simplex::{Cochain, SimplexId, SimplicialComplex};
use crate::substrate::NodeId;
use ndarray::Array1;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// A cocycle representing a cohomology class
///
/// A cocycle is a cochain f such that delta(f) = 0
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Cocycle {
/// Degree (dimension) of the cocycle
pub degree: usize,
/// Values on simplices
pub values: HashMap<SimplexId, f64>,
/// Whether this is a coboundary (trivial cocycle)
pub is_coboundary: bool,
/// Norm of the cocycle
norm: f64,
}
impl Cocycle {
/// Create a new cocycle
pub fn new(degree: usize, values: HashMap<SimplexId, f64>) -> Self {
let norm = values.values().map(|v| v * v).sum::<f64>().sqrt();
Self {
degree,
values,
is_coboundary: false,
norm,
}
}
/// Create a zero cocycle
pub fn zero(degree: usize) -> Self {
Self {
degree,
values: HashMap::new(),
is_coboundary: false,
norm: 0.0,
}
}
/// Create a cocycle from a cochain
pub fn from_cochain(cochain: &Cochain) -> Self {
Self::new(cochain.dimension, cochain.values.clone())
}
/// Get the value on a simplex
pub fn get(&self, id: SimplexId) -> f64 {
self.values.get(&id).copied().unwrap_or(0.0)
}
/// Set the value on a simplex
pub fn set(&mut self, id: SimplexId, value: f64) {
if value.abs() > 1e-10 {
self.values.insert(id, value);
} else {
self.values.remove(&id);
}
self.update_norm();
}
/// Update the cached norm
fn update_norm(&mut self) {
self.norm = self.values.values().map(|v| v * v).sum::<f64>().sqrt();
}
/// Get the L2 norm
pub fn norm(&self) -> f64 {
self.norm
}
/// Normalize the cocycle to unit norm
pub fn normalize(&mut self) {
if self.norm > 1e-10 {
let scale = 1.0 / self.norm;
for v in self.values.values_mut() {
*v *= scale;
}
self.norm = 1.0;
}
}
/// Add another cocycle
pub fn add(&mut self, other: &Cocycle) {
assert_eq!(self.degree, other.degree, "Cocycle degrees must match");
for (&id, &value) in &other.values {
let new_val = self.get(id) + value;
self.set(id, new_val);
}
}
/// Scale the cocycle
pub fn scale(&mut self, factor: f64) {
for v in self.values.values_mut() {
*v *= factor;
}
self.norm *= factor.abs();
}
/// Check if this is a zero cocycle
pub fn is_zero(&self, tolerance: f64) -> bool {
self.norm < tolerance
}
/// Inner product with another cocycle
pub fn inner_product(&self, other: &Cocycle) -> f64 {
assert_eq!(self.degree, other.degree, "Cocycle degrees must match");
let mut sum = 0.0;
for (&id, &v) in &self.values {
sum += v * other.get(id);
}
sum
}
/// Convert to cochain
pub fn to_cochain(&self) -> Cochain {
Cochain::from_values(self.degree, self.values.clone())
}
}
/// Coboundary operator delta: C^n -> C^{n+1}
///
/// For a cochain f on n-simplices, delta(f) evaluated on an (n+1)-simplex sigma is:
/// delta(f)(sigma) = sum_{i=0}^{n+1} (-1)^i f(d_i sigma)
/// where d_i sigma is the i-th face of sigma
pub struct Coboundary {
/// The simplicial complex
complex: SimplicialComplex,
}
impl Coboundary {
/// Create a coboundary operator for a simplicial complex
pub fn new(complex: SimplicialComplex) -> Self {
Self { complex }
}
/// Get reference to the complex
pub fn complex(&self) -> &SimplicialComplex {
&self.complex
}
/// Apply the coboundary operator to a cochain
///
/// delta: C^n -> C^{n+1}
pub fn apply(&self, cochain: &Cochain) -> Cochain {
let target_dim = cochain.dimension + 1;
let mut result = Cochain::zero(target_dim);
// For each (n+1)-simplex sigma
if let Some(target_simplices) = self.complex.simplices.get(&target_dim) {
for (sigma_id, sigma) in target_simplices {
// Compute delta(f)(sigma) = sum(-1)^i f(d_i sigma)
let boundary = sigma.boundary();
let mut value = 0.0;
for (face, sign) in &boundary {
value += (*sign as f64) * cochain.get(face.id);
}
if value.abs() > 1e-10 {
result.set(*sigma_id, value);
}
}
}
result
}
/// Apply the adjoint coboundary (negative boundary transpose)
///
/// delta^*: C^{n+1} -> C^n
pub fn apply_adjoint(&self, cochain: &Cochain) -> Cochain {
if cochain.dimension == 0 {
return Cochain::zero(0);
}
let target_dim = cochain.dimension - 1;
let mut result = Cochain::zero(target_dim);
// For each n-simplex tau, compute sum over (n+1)-simplices containing tau
if let Some(simplices) = self.complex.simplices.get(&cochain.dimension) {
for (sigma_id, sigma) in simplices {
let boundary = sigma.boundary();
let sigma_value = cochain.get(*sigma_id);
if sigma_value.abs() > 1e-10 {
for (face, sign) in &boundary {
let current = result.get(face.id);
result.set(face.id, current + (*sign as f64) * sigma_value);
}
}
}
}
result
}
/// Check if a cochain is a cocycle (in kernel of delta)
pub fn is_cocycle(&self, cochain: &Cochain, tolerance: f64) -> bool {
let delta_f = self.apply(cochain);
delta_f.norm() < tolerance
}
/// Check if a cocycle is a coboundary (in image of delta)
pub fn is_coboundary(&self, cocycle: &Cocycle, tolerance: f64) -> bool {
// A cocycle is a coboundary if it's in the image of delta
// This requires solving delta(g) = f, which is more complex
// For now, we use a simple check based on dimension
if cocycle.degree == 0 {
// 0-cocycles are coboundaries iff they're constant
let values: Vec<f64> = cocycle.values.values().copied().collect();
if values.is_empty() {
return true;
}
let first = values[0];
values.iter().all(|&v| (v - first).abs() < tolerance)
} else {
// For higher degrees, we'd need to solve a linear system
// Returning false as a conservative estimate
false
}
}
/// Compute the Laplacian L = delta^* delta + delta delta^*
pub fn laplacian(&self, cochain: &Cochain) -> Cochain {
// L = delta^* delta + delta delta^*
let delta_f = self.apply(cochain);
let delta_star_delta_f = self.apply_adjoint(&delta_f);
let delta_star_f = self.apply_adjoint(cochain);
let delta_delta_star_f = self.apply(&delta_star_f);
let mut result = delta_star_delta_f;
result.add(&delta_delta_star_f);
result
}
}
/// Builder for constructing cocycles
pub struct CocycleBuilder {
degree: usize,
values: HashMap<SimplexId, f64>,
}
impl CocycleBuilder {
/// Create a new builder
pub fn new(degree: usize) -> Self {
Self {
degree,
values: HashMap::new(),
}
}
/// Set value on a simplex
pub fn value(mut self, id: SimplexId, value: f64) -> Self {
self.values.insert(id, value);
self
}
/// Set values from iterator
pub fn values(mut self, values: impl IntoIterator<Item = (SimplexId, f64)>) -> Self {
for (id, value) in values {
self.values.insert(id, value);
}
self
}
/// Build the cocycle
pub fn build(self) -> Cocycle {
Cocycle::new(self.degree, self.values)
}
}
/// Sheaf-valued cocycle for sheaf cohomology
///
/// Instead of real-valued, this assigns vectors from stalks
#[derive(Debug, Clone)]
pub struct SheafCocycle {
/// Degree of the cocycle
pub degree: usize,
/// Values on simplices (simplex -> vector value)
pub values: HashMap<SimplexId, Array1<f64>>,
}
impl SheafCocycle {
/// Create a new sheaf-valued cocycle
pub fn new(degree: usize) -> Self {
Self {
degree,
values: HashMap::new(),
}
}
/// Set value on a simplex
pub fn set(&mut self, id: SimplexId, value: Array1<f64>) {
self.values.insert(id, value);
}
/// Get value on a simplex
pub fn get(&self, id: SimplexId) -> Option<&Array1<f64>> {
self.values.get(&id)
}
/// Compute norm squared
pub fn norm_squared(&self) -> f64 {
self.values
.values()
.map(|v| v.iter().map(|x| x * x).sum::<f64>())
.sum()
}
/// Compute norm
pub fn norm(&self) -> f64 {
self.norm_squared().sqrt()
}
}
/// Sheaf coboundary operator
///
/// For a sheaf F on a graph, the coboundary uses restriction maps:
/// (delta f)(e) = rho_t(f(t)) - rho_s(f(s))
pub struct SheafCoboundary<'a> {
/// The sheaf
sheaf: &'a Sheaf,
/// Edge list as (source, target) pairs
edges: Vec<(NodeId, NodeId)>,
}
impl<'a> SheafCoboundary<'a> {
/// Create a sheaf coboundary operator
pub fn new(sheaf: &'a Sheaf, edges: Vec<(NodeId, NodeId)>) -> Self {
Self { sheaf, edges }
}
/// Apply sheaf coboundary to a section
///
/// Returns the residual vector at each edge
pub fn apply(&self, section: &SheafSection) -> SheafCocycle {
let mut result = SheafCocycle::new(1);
for (i, &(source, target)) in self.edges.iter().enumerate() {
if let Some(residual) = self.sheaf.edge_residual(source, target, section) {
result.set(SimplexId::new(i as u64), residual);
}
}
result
}
/// Compute the sheaf Laplacian energy
pub fn laplacian_energy(&self, section: &SheafSection) -> f64 {
let delta_s = self.apply(section);
delta_s.norm_squared()
}
/// Check if section is a global section (delta s = 0)
pub fn is_global_section(&self, section: &SheafSection, tolerance: f64) -> bool {
let delta_s = self.apply(section);
delta_s.norm() < tolerance
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cohomology::simplex::Simplex;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_cocycle_creation() {
let mut values = HashMap::new();
values.insert(SimplexId::new(0), 1.0);
values.insert(SimplexId::new(1), 2.0);
let cocycle = Cocycle::new(1, values);
assert_eq!(cocycle.degree, 1);
assert!((cocycle.norm() - (5.0_f64).sqrt()).abs() < 1e-10);
}
#[test]
fn test_cocycle_builder() {
let cocycle = CocycleBuilder::new(1)
.value(SimplexId::new(0), 3.0)
.value(SimplexId::new(1), 4.0)
.build();
assert!((cocycle.norm() - 5.0).abs() < 1e-10);
}
#[test]
fn test_coboundary_on_path() {
// Create a path graph: v0 -- v1 -- v2
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
let nodes = vec![v0, v1, v2];
let edges = vec![(v0, v1), (v1, v2)];
let complex = SimplicialComplex::from_graph_cliques(&nodes, &edges, 1);
let coboundary = Coboundary::new(complex);
// Create a 0-cochain that assigns different values to vertices
let mut f = Cochain::zero(0);
for (i, simplex) in coboundary.complex().simplices_of_dim(0).enumerate() {
f.set(simplex.id, i as f64);
}
// Apply coboundary
let delta_f = coboundary.apply(&f);
assert_eq!(delta_f.dimension, 1);
// delta(f) should be non-zero since f is not constant
assert!(!delta_f.is_zero());
}
#[test]
fn test_constant_cochain_is_cocycle() {
// Create a triangle
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
let nodes = vec![v0, v1, v2];
let edges = vec![(v0, v1), (v1, v2), (v0, v2)];
let complex = SimplicialComplex::from_graph_cliques(&nodes, &edges, 2);
let coboundary = Coboundary::new(complex);
// Create a constant 0-cochain
let mut f = Cochain::zero(0);
for simplex in coboundary.complex().simplices_of_dim(0) {
f.set(simplex.id, 1.0);
}
// Constant function should be a cocycle
assert!(coboundary.is_cocycle(&f, 1e-10));
}
#[test]
fn test_cocycle_inner_product() {
let c1 = CocycleBuilder::new(1)
.value(SimplexId::new(0), 1.0)
.value(SimplexId::new(1), 0.0)
.build();
let c2 = CocycleBuilder::new(1)
.value(SimplexId::new(0), 0.0)
.value(SimplexId::new(1), 1.0)
.build();
// Orthogonal cocycles
assert!((c1.inner_product(&c2)).abs() < 1e-10);
// Self inner product equals norm squared
assert!((c1.inner_product(&c1) - c1.norm() * c1.norm()).abs() < 1e-10);
}
}

View File

@@ -0,0 +1,600 @@
//! Cohomology Group Computation
//!
//! Computes the cohomology groups H^n(K, F) using linear algebra methods.
use super::cocycle::{Coboundary, Cocycle};
use super::simplex::{Cochain, SimplexId, SimplicialComplex};
use ndarray::{Array1, Array2, Axis};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Configuration for cohomology computation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CohomologyConfig {
/// Maximum dimension to compute
pub max_dimension: usize,
/// Tolerance for numerical zero
pub tolerance: f64,
/// Whether to compute explicit generators
pub compute_generators: bool,
/// Whether to use sparse methods for large complexes
pub use_sparse: bool,
}
impl Default for CohomologyConfig {
fn default() -> Self {
Self {
max_dimension: 2,
tolerance: 1e-10,
compute_generators: true,
use_sparse: false,
}
}
}
/// Betti numbers of a space
///
/// The n-th Betti number b_n = dim(H^n) counts "n-dimensional holes"
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BettiNumbers {
/// Betti numbers indexed by dimension
pub numbers: Vec<usize>,
/// Euler characteristic (alternating sum)
pub euler_characteristic: i64,
}
impl BettiNumbers {
/// Create from vector of Betti numbers
pub fn from_vec(numbers: Vec<usize>) -> Self {
let euler_characteristic = numbers
.iter()
.enumerate()
.map(|(i, &b)| if i % 2 == 0 { b as i64 } else { -(b as i64) })
.sum();
Self {
numbers,
euler_characteristic,
}
}
/// Get Betti number for dimension n
pub fn b(&self, n: usize) -> usize {
self.numbers.get(n).copied().unwrap_or(0)
}
/// Total number of holes
pub fn total_rank(&self) -> usize {
self.numbers.iter().sum()
}
}
/// A cohomology group H^n(K, F)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CohomologyGroup {
/// Dimension of the cohomology group
pub dimension: usize,
/// Generators (representatives of cohomology classes)
pub generators: Vec<Cocycle>,
/// Betti number (dimension of the group as vector space)
pub betti_number: usize,
/// Whether generators are normalized
pub normalized: bool,
}
impl CohomologyGroup {
/// Create a trivial cohomology group
pub fn trivial(dimension: usize) -> Self {
Self {
dimension,
generators: Vec::new(),
betti_number: 0,
normalized: true,
}
}
/// Create a cohomology group from generators
pub fn from_generators(dimension: usize, generators: Vec<Cocycle>) -> Self {
let betti_number = generators.len();
Self {
dimension,
generators,
betti_number,
normalized: false,
}
}
/// Normalize the generators to be orthonormal
pub fn normalize(&mut self) {
if self.generators.is_empty() {
self.normalized = true;
return;
}
// Gram-Schmidt orthonormalization
let mut orthonormal = Vec::new();
for gen in &self.generators {
let mut v = gen.clone();
// Subtract projections onto previous vectors
for u in &orthonormal {
let proj_coeff = v.inner_product(u) / u.inner_product(u);
let mut proj = u.clone();
proj.scale(proj_coeff);
v.scale(1.0);
for (&id, &val) in &proj.values {
let current = v.get(id);
v.set(id, current - val);
}
}
// Normalize
v.normalize();
if v.norm() > 1e-10 {
orthonormal.push(v);
}
}
self.generators = orthonormal;
self.betti_number = self.generators.len();
self.normalized = true;
}
/// Check if a cocycle represents the zero class
pub fn is_trivial_class(&self, cocycle: &Cocycle) -> bool {
// A cocycle represents the zero class if it's a coboundary
// This is checked during computation
cocycle.is_coboundary
}
/// Project a cocycle onto this cohomology group
pub fn project(&self, cocycle: &Cocycle) -> Cocycle {
if self.generators.is_empty() {
return Cocycle::zero(self.dimension);
}
let mut result = Cocycle::zero(self.dimension);
for gen in &self.generators {
let coeff = cocycle.inner_product(gen);
let mut contrib = gen.clone();
contrib.scale(coeff);
for (&id, &val) in &contrib.values {
let current = result.get(id);
result.set(id, current + val);
}
}
result
}
}
/// Computes cohomology groups for a simplicial complex
pub struct CohomologyComputer {
/// The simplicial complex
complex: SimplicialComplex,
/// Configuration
config: CohomologyConfig,
/// Coboundary operator
coboundary: Coboundary,
/// Cached boundary matrices
boundary_matrices: HashMap<usize, Array2<f64>>,
}
impl CohomologyComputer {
/// Create a new cohomology computer
pub fn new(complex: SimplicialComplex, config: CohomologyConfig) -> Self {
let coboundary = Coboundary::new(complex.clone());
Self {
complex,
config,
coboundary,
boundary_matrices: HashMap::new(),
}
}
/// Create with default configuration
pub fn with_default_config(complex: SimplicialComplex) -> Self {
Self::new(complex, CohomologyConfig::default())
}
/// Build the boundary matrix for dimension n
///
/// The boundary matrix d_n: C_n -> C_{n-1} has entry (i,j) equal to
/// the coefficient of simplex i in the boundary of simplex j
fn build_boundary_matrix(&mut self, n: usize) -> Array2<f64> {
if let Some(matrix) = self.boundary_matrices.get(&n) {
return matrix.clone();
}
let n_simplices: Vec<_> = self.complex.simplices_of_dim(n).collect();
let n_minus_1_simplices: Vec<_> = if n > 0 {
self.complex.simplices_of_dim(n - 1).collect()
} else {
Vec::new()
};
if n == 0 || n_minus_1_simplices.is_empty() {
let matrix = Array2::zeros((0, n_simplices.len()));
self.boundary_matrices.insert(n, matrix.clone());
return matrix;
}
// Create index maps
let simplex_to_idx: HashMap<SimplexId, usize> = n_minus_1_simplices
.iter()
.enumerate()
.map(|(i, s)| (s.id, i))
.collect();
let rows = n_minus_1_simplices.len();
let cols = n_simplices.len();
let mut matrix = Array2::zeros((rows, cols));
for (j, simplex) in n_simplices.iter().enumerate() {
let boundary = simplex.boundary();
for (face, sign) in boundary {
if let Some(&i) = simplex_to_idx.get(&face.id) {
matrix[[i, j]] = sign as f64;
}
}
}
self.boundary_matrices.insert(n, matrix.clone());
matrix
}
/// Compute the coboundary matrix (transpose of boundary matrix)
fn build_coboundary_matrix(&mut self, n: usize) -> Array2<f64> {
let boundary = self.build_boundary_matrix(n + 1);
boundary.t().to_owned()
}
/// Compute the kernel of a matrix using SVD
fn compute_kernel(&self, matrix: &Array2<f64>) -> Vec<Array1<f64>> {
if matrix.is_empty() || matrix.ncols() == 0 {
return Vec::new();
}
// Use simple Gaussian elimination for kernel computation
// For production, should use proper SVD
let mut kernel_basis = Vec::new();
// Find null space using reduced row echelon form
let (rref, pivot_cols) = self.row_reduce(matrix);
let n_cols = matrix.ncols();
let free_vars: Vec<usize> = (0..n_cols).filter(|c| !pivot_cols.contains(c)).collect();
for &free_var in &free_vars {
let mut kernel_vec = Array1::zeros(n_cols);
kernel_vec[free_var] = 1.0;
// Back-substitute to find other components
for (row_idx, &pivot_col) in pivot_cols.iter().enumerate() {
if row_idx < rref.nrows() {
kernel_vec[pivot_col] = -rref[[row_idx, free_var]];
}
}
if kernel_vec.iter().map(|x| x * x).sum::<f64>().sqrt() > self.config.tolerance {
kernel_basis.push(kernel_vec);
}
}
kernel_basis
}
/// Compute the image of a matrix
fn compute_image(&self, matrix: &Array2<f64>) -> Vec<Array1<f64>> {
if matrix.is_empty() || matrix.ncols() == 0 {
return Vec::new();
}
let (_, pivot_cols) = self.row_reduce(matrix);
pivot_cols
.into_iter()
.map(|col| matrix.column(col).to_owned())
.collect()
}
/// Row reduce to RREF
fn row_reduce(&self, matrix: &Array2<f64>) -> (Array2<f64>, Vec<usize>) {
let mut a = matrix.clone();
let m = a.nrows();
let n = a.ncols();
let mut pivot_cols = Vec::new();
let mut pivot_row = 0;
for col in 0..n {
if pivot_row >= m {
break;
}
// Find pivot
let mut max_row = pivot_row;
let mut max_val = a[[pivot_row, col]].abs();
for row in (pivot_row + 1)..m {
if a[[row, col]].abs() > max_val {
max_val = a[[row, col]].abs();
max_row = row;
}
}
if max_val < self.config.tolerance {
continue;
}
// Swap rows
for c in 0..n {
let tmp = a[[pivot_row, c]];
a[[pivot_row, c]] = a[[max_row, c]];
a[[max_row, c]] = tmp;
}
// Scale pivot row
let pivot_val = a[[pivot_row, col]];
for c in 0..n {
a[[pivot_row, c]] /= pivot_val;
}
// Eliminate other rows
for row in 0..m {
if row != pivot_row {
let factor = a[[row, col]];
for c in 0..n {
a[[row, c]] -= factor * a[[pivot_row, c]];
}
}
}
pivot_cols.push(col);
pivot_row += 1;
}
(a, pivot_cols)
}
/// Compute cohomology in dimension n: H^n = ker(delta_n) / im(delta_{n-1})
pub fn compute_cohomology(&mut self, n: usize) -> CohomologyGroup {
// Get simplices for this dimension
let n_simplices: Vec<_> = self.complex.simplices_of_dim(n).collect();
if n_simplices.is_empty() {
return CohomologyGroup::trivial(n);
}
// Build simplex ID to index map
let simplex_to_idx: HashMap<SimplexId, usize> = n_simplices
.iter()
.enumerate()
.map(|(i, s)| (s.id, i))
.collect();
// Compute ker(delta_n): cochains f such that delta(f) = 0
let delta_n = self.build_coboundary_matrix(n);
let kernel_basis = self.compute_kernel(&delta_n);
if kernel_basis.is_empty() {
return CohomologyGroup::trivial(n);
}
// Compute im(delta_{n-1}): cochains that are coboundaries
let image_basis = if n > 0 {
let delta_n_minus_1 = self.build_coboundary_matrix(n - 1);
self.compute_image(&delta_n_minus_1)
} else {
Vec::new()
};
// Quotient: find kernel vectors not in image
// Use orthogonal projection to remove image component
let generators = self.quotient_space(&kernel_basis, &image_basis, &simplex_to_idx, n);
CohomologyGroup::from_generators(n, generators)
}
/// Compute quotient space ker/im
fn quotient_space(
&self,
kernel: &[Array1<f64>],
image: &[Array1<f64>],
simplex_to_idx: &HashMap<SimplexId, usize>,
dimension: usize,
) -> Vec<Cocycle> {
if kernel.is_empty() {
return Vec::new();
}
// Build index to simplex ID map
let idx_to_simplex: HashMap<usize, SimplexId> =
simplex_to_idx.iter().map(|(&id, &idx)| (idx, id)).collect();
// If no image, all kernel elements are generators
if image.is_empty() {
return kernel
.iter()
.map(|v| self.array_to_cocycle(v, &idx_to_simplex, dimension, false))
.collect();
}
// Orthogonalize kernel against image
let mut quotient_basis: Vec<Array1<f64>> = Vec::new();
for kernel_vec in kernel {
let mut v = kernel_vec.clone();
// Project out image components
for img_vec in image {
let norm_sq = img_vec.iter().map(|x| x * x).sum::<f64>();
if norm_sq > self.config.tolerance {
let dot: f64 = v.iter().zip(img_vec.iter()).map(|(a, b)| a * b).sum();
let proj_coeff = dot / norm_sq;
v = &v - &(img_vec * proj_coeff);
}
}
// Project out previous quotient vectors
for prev in &quotient_basis {
let norm_sq: f64 = prev.iter().map(|x| x * x).sum();
if norm_sq > self.config.tolerance {
let dot: f64 = v.iter().zip(prev.iter()).map(|(a, b)| a * b).sum();
let proj_coeff = dot / norm_sq;
v = &v - &(prev * proj_coeff);
}
}
let norm = v.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > self.config.tolerance {
quotient_basis.push(v);
}
}
quotient_basis
.iter()
.map(|v| self.array_to_cocycle(v, &idx_to_simplex, dimension, false))
.collect()
}
/// Convert array to cocycle
fn array_to_cocycle(
&self,
arr: &Array1<f64>,
idx_to_simplex: &HashMap<usize, SimplexId>,
dimension: usize,
is_coboundary: bool,
) -> Cocycle {
let mut values = HashMap::new();
for (idx, &val) in arr.iter().enumerate() {
if val.abs() > self.config.tolerance {
if let Some(&simplex_id) = idx_to_simplex.get(&idx) {
values.insert(simplex_id, val);
}
}
}
let mut cocycle = Cocycle::new(dimension, values);
cocycle.is_coboundary = is_coboundary;
cocycle
}
/// Compute all cohomology groups up to max_dimension
pub fn compute_all(&mut self) -> Vec<CohomologyGroup> {
let max_dim = self.config.max_dimension.min(self.complex.max_dimension);
(0..=max_dim).map(|n| self.compute_cohomology(n)).collect()
}
/// Compute Betti numbers
pub fn compute_betti_numbers(&mut self) -> BettiNumbers {
let groups = self.compute_all();
let numbers: Vec<usize> = groups.iter().map(|g| g.betti_number).collect();
BettiNumbers::from_vec(numbers)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::substrate::NodeId;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_point_cohomology() {
// Single point: H^0 = R, H^n = 0 for n > 0
let v0 = make_node_id();
let complex = SimplicialComplex::from_graph_cliques(&[v0], &[], 0);
let mut computer = CohomologyComputer::with_default_config(complex);
let betti = computer.compute_betti_numbers();
assert_eq!(betti.b(0), 1);
}
#[test]
fn test_two_points_cohomology() {
// Two disconnected points: H^0 = R^2
let v0 = make_node_id();
let v1 = make_node_id();
let complex = SimplicialComplex::from_graph_cliques(&[v0, v1], &[], 0);
let mut computer = CohomologyComputer::with_default_config(complex);
let betti = computer.compute_betti_numbers();
assert_eq!(betti.b(0), 2);
}
#[test]
fn test_edge_cohomology() {
// Single edge: H^0 = R (connected), H^n = 0 for n > 0
let v0 = make_node_id();
let v1 = make_node_id();
let complex = SimplicialComplex::from_graph_cliques(&[v0, v1], &[(v0, v1)], 1);
let mut computer = CohomologyComputer::with_default_config(complex);
let betti = computer.compute_betti_numbers();
assert_eq!(betti.b(0), 1);
}
#[test]
fn test_circle_cohomology() {
// Triangle boundary (circle): H^0 = R, H^1 = R
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
// Only edges, no filled triangle
let nodes = vec![v0, v1, v2];
let edges = vec![(v0, v1), (v1, v2), (v0, v2)];
let complex = SimplicialComplex::from_graph_cliques(&nodes, &edges, 1);
let mut computer = CohomologyComputer::with_default_config(complex);
let betti = computer.compute_betti_numbers();
assert_eq!(betti.b(0), 1); // Connected
assert_eq!(betti.b(1), 1); // One hole
}
#[test]
fn test_filled_triangle_cohomology() {
// Filled triangle (disk): H^0 = R, H^n = 0 for n > 0
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
let nodes = vec![v0, v1, v2];
let edges = vec![(v0, v1), (v1, v2), (v0, v2)];
let complex = SimplicialComplex::from_graph_cliques(&nodes, &edges, 2);
let mut computer = CohomologyComputer::with_default_config(complex);
let betti = computer.compute_betti_numbers();
assert_eq!(betti.b(0), 1); // Connected
assert_eq!(betti.b(1), 0); // No hole (filled)
}
#[test]
fn test_euler_characteristic() {
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
let nodes = vec![v0, v1, v2];
let edges = vec![(v0, v1), (v1, v2), (v0, v2)];
let complex = SimplicialComplex::from_graph_cliques(&nodes, &edges, 2);
// Euler characteristic from simplices: 3 - 3 + 1 = 1
assert_eq!(complex.euler_characteristic(), 1);
let mut computer = CohomologyComputer::with_default_config(complex);
let betti = computer.compute_betti_numbers();
// Euler characteristic from Betti: b0 - b1 + b2 = 1 - 0 + 0 = 1
assert_eq!(betti.euler_characteristic, 1);
}
}

View File

@@ -0,0 +1,485 @@
//! Sheaf Diffusion with Cohomology
//!
//! Combines heat diffusion on the sheaf with cohomological obstruction indicators.
//! The diffusion process smooths local inconsistencies while the obstruction
//! indicators show where global consistency cannot be achieved.
use super::laplacian::{LaplacianConfig, SheafLaplacian};
use super::obstruction::{ObstructionDetector, ObstructionSeverity};
use super::sheaf::SheafSection;
use crate::substrate::NodeId;
use crate::substrate::SheafGraph;
use ndarray::Array1;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Configuration for sheaf diffusion
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SheafDiffusionConfig {
/// Time step for diffusion
pub dt: f64,
/// Number of diffusion steps
pub num_steps: usize,
/// Diffusion coefficient
pub diffusion_coefficient: f64,
/// Whether to track obstruction indicators
pub track_obstructions: bool,
/// Convergence tolerance for early stopping
pub convergence_tolerance: f64,
/// Maximum residual change per step
pub max_step_change: f64,
}
impl Default for SheafDiffusionConfig {
fn default() -> Self {
Self {
dt: 0.1,
num_steps: 100,
diffusion_coefficient: 1.0,
track_obstructions: true,
convergence_tolerance: 1e-6,
max_step_change: 1.0,
}
}
}
/// Obstruction indicator during diffusion
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObstructionIndicator {
/// Step number when detected
pub step: usize,
/// Total obstruction energy at this step
pub energy: f64,
/// Severity level
pub severity: ObstructionSeverity,
/// Per-node obstruction energies
pub node_energies: HashMap<NodeId, f64>,
/// Whether obstruction is persistent (not decreasing)
pub is_persistent: bool,
}
impl ObstructionIndicator {
/// Create a new indicator
pub fn new(step: usize, energy: f64) -> Self {
Self {
step,
energy,
severity: ObstructionSeverity::from_energy(energy, &[0.01, 0.1, 0.5, 1.0]),
node_energies: HashMap::new(),
is_persistent: false,
}
}
/// Check if this indicates a significant obstruction
pub fn is_significant(&self) -> bool {
self.severity.requires_action()
}
}
/// Result of sheaf diffusion
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiffusionResult {
/// Final section after diffusion
pub final_section: HashMap<NodeId, Vec<f64>>,
/// Initial energy
pub initial_energy: f64,
/// Final energy
pub final_energy: f64,
/// Energy history (per step)
pub energy_history: Vec<f64>,
/// Obstruction indicators (if tracked)
pub obstruction_indicators: Vec<ObstructionIndicator>,
/// Number of steps taken
pub steps_taken: usize,
/// Whether diffusion converged
pub converged: bool,
/// Residual obstruction (cohomological component that cannot be diffused away)
pub residual_obstruction: Option<f64>,
}
impl DiffusionResult {
/// Get the energy reduction ratio
pub fn energy_reduction(&self) -> f64 {
if self.initial_energy > 0.0 {
1.0 - (self.final_energy / self.initial_energy)
} else {
0.0
}
}
/// Check if obstruction was detected
pub fn has_obstruction(&self) -> bool {
self.residual_obstruction.map(|e| e > 0.01).unwrap_or(false)
}
/// Get persistent obstructions
pub fn persistent_obstructions(&self) -> Vec<&ObstructionIndicator> {
self.obstruction_indicators
.iter()
.filter(|o| o.is_persistent)
.collect()
}
}
/// Sheaf diffusion with cohomological obstruction tracking
pub struct SheafDiffusion {
/// Configuration
config: SheafDiffusionConfig,
/// Laplacian for diffusion
laplacian: SheafLaplacian,
/// Obstruction detector
detector: ObstructionDetector,
}
impl SheafDiffusion {
/// Create a new diffusion process
pub fn new(graph: &SheafGraph, config: SheafDiffusionConfig) -> Self {
let laplacian_config = LaplacianConfig::default();
let laplacian = SheafLaplacian::from_graph(graph, laplacian_config);
let detector = ObstructionDetector::new();
Self {
config,
laplacian,
detector,
}
}
/// Run diffusion on a SheafGraph
///
/// The diffusion equation is:
/// dx/dt = -L * x
///
/// where L is the sheaf Laplacian. This smooths inconsistencies but
/// cannot eliminate cohomological obstructions.
pub fn diffuse(&self, graph: &SheafGraph) -> DiffusionResult {
// Initialize section from graph state
let mut section = self.graph_to_section(graph);
// Compute initial energy
let initial_energy = self.laplacian.energy(graph, &section);
let mut energy_history = vec![initial_energy];
let mut obstruction_indicators = Vec::new();
let mut prev_energy = initial_energy;
let mut converged = false;
let mut steps_taken = 0;
// Run diffusion steps
for step in 0..self.config.num_steps {
steps_taken = step + 1;
// Compute Laplacian of current section
let laplacian_x = self.laplacian.apply(graph, &section);
// Update: x_{n+1} = x_n - dt * D * L * x_n
let scale = self.config.dt * self.config.diffusion_coefficient;
self.update_section(&mut section, &laplacian_x, -scale);
// Compute new energy
let new_energy = self.laplacian.energy(graph, &section);
energy_history.push(new_energy);
// Track obstruction indicators
if self.config.track_obstructions && step % 10 == 0 {
let mut indicator = ObstructionIndicator::new(step, new_energy);
// Check if obstruction is persistent
if step > 20 {
let recent_energies =
&energy_history[energy_history.len().saturating_sub(10)..];
let avg_recent: f64 =
recent_energies.iter().sum::<f64>() / recent_energies.len() as f64;
indicator.is_persistent = (new_energy - avg_recent).abs() < 0.01 * avg_recent;
}
// Compute per-node energies
indicator.node_energies = self.compute_node_energies(graph, &section);
obstruction_indicators.push(indicator);
}
// Check convergence
let energy_change = (prev_energy - new_energy).abs();
if energy_change < self.config.convergence_tolerance {
converged = true;
break;
}
prev_energy = new_energy;
}
let final_energy = energy_history.last().copied().unwrap_or(initial_energy);
// Detect residual obstruction (energy that cannot be diffused away)
let residual_obstruction = if converged && final_energy > 0.001 {
Some(final_energy)
} else {
None
};
// Convert final section to result format
let final_section: HashMap<NodeId, Vec<f64>> = section
.sections
.into_iter()
.map(|(k, v)| (k, v.to_vec()))
.collect();
DiffusionResult {
final_section,
initial_energy,
final_energy,
energy_history,
obstruction_indicators,
steps_taken,
converged,
residual_obstruction,
}
}
/// Diffuse with adaptive time stepping
pub fn diffuse_adaptive(&self, graph: &SheafGraph) -> DiffusionResult {
let mut section = self.graph_to_section(graph);
let initial_energy = self.laplacian.energy(graph, &section);
let mut energy_history = vec![initial_energy];
let mut obstruction_indicators = Vec::new();
let mut dt = self.config.dt;
let mut prev_energy = initial_energy;
let mut steps_taken = 0;
let mut converged = false;
for step in 0..self.config.num_steps * 2 {
steps_taken = step + 1;
// Compute update
let laplacian_x = self.laplacian.apply(graph, &section);
// Adaptive step: reduce dt if energy increases
let mut best_energy = f64::MAX;
let mut best_section = section.clone();
for _ in 0..5 {
let mut trial_section = section.clone();
let scale = dt * self.config.diffusion_coefficient;
self.update_section(&mut trial_section, &laplacian_x, -scale);
let trial_energy = self.laplacian.energy(graph, &trial_section);
if trial_energy < best_energy {
best_energy = trial_energy;
best_section = trial_section;
}
if trial_energy <= prev_energy {
dt = (dt * 1.1).min(1.0);
break;
} else {
dt *= 0.5;
}
}
section = best_section;
energy_history.push(best_energy);
// Track obstruction
if self.config.track_obstructions && step % 10 == 0 {
let indicator = ObstructionIndicator::new(step, best_energy);
obstruction_indicators.push(indicator);
}
// Check convergence
if (prev_energy - best_energy).abs() < self.config.convergence_tolerance {
converged = true;
break;
}
prev_energy = best_energy;
}
let final_energy = energy_history.last().copied().unwrap_or(initial_energy);
let residual_obstruction = if converged && final_energy > 0.001 {
Some(final_energy)
} else {
None
};
let final_section: HashMap<NodeId, Vec<f64>> = section
.sections
.into_iter()
.map(|(k, v)| (k, v.to_vec()))
.collect();
DiffusionResult {
final_section,
initial_energy,
final_energy,
energy_history,
obstruction_indicators,
steps_taken,
converged,
residual_obstruction,
}
}
/// Convert graph to section
fn graph_to_section(&self, graph: &SheafGraph) -> SheafSection {
let mut section = SheafSection::empty();
for node_id in graph.node_ids() {
if let Some(node) = graph.get_node(node_id) {
let values: Vec<f64> = node.state.as_slice().iter().map(|&x| x as f64).collect();
section.set(node_id, Array1::from_vec(values));
}
}
section
}
/// Update section by adding scaled Laplacian
fn update_section(&self, section: &mut SheafSection, laplacian: &SheafSection, scale: f64) {
for (node_id, laplacian_val) in &laplacian.sections {
if let Some(current) = section.sections.get_mut(node_id) {
*current = &*current + &(laplacian_val * scale);
// Clamp values to prevent instability
for val in current.iter_mut() {
*val = val.clamp(-self.config.max_step_change, self.config.max_step_change);
}
}
}
}
/// Compute per-node energies
fn compute_node_energies(
&self,
graph: &SheafGraph,
section: &SheafSection,
) -> HashMap<NodeId, f64> {
let mut node_energies: HashMap<NodeId, f64> = HashMap::new();
for node_id in graph.node_ids() {
let mut energy = 0.0;
for edge_id in graph.edges_incident_to(node_id) {
if let Some(edge) = graph.get_edge(edge_id) {
let other = if edge.source == node_id {
edge.target
} else {
edge.source
};
if let (Some(this_val), Some(other_val)) =
(section.get(node_id), section.get(other))
{
let residual = this_val - other_val;
let residual_norm: f64 = residual.iter().map(|x| x * x).sum();
energy += (edge.weight as f64) * residual_norm;
}
}
}
node_energies.insert(node_id, energy);
}
node_energies
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::substrate::edge::SheafEdgeBuilder;
use crate::substrate::node::SheafNodeBuilder;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_diffusion_reduces_energy() {
let graph = SheafGraph::new();
// Create nodes with different states
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[0.0, 1.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(2)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let config = SheafDiffusionConfig {
num_steps: 50,
..Default::default()
};
let diffusion = SheafDiffusion::new(&graph, config);
let result = diffusion.diffuse(&graph);
// Energy should decrease
assert!(result.final_energy < result.initial_energy);
assert!(result.energy_reduction() > 0.0);
}
#[test]
fn test_converged_diffusion() {
let graph = SheafGraph::new();
// Already coherent
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 1.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 1.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(2)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let config = SheafDiffusionConfig::default();
let diffusion = SheafDiffusion::new(&graph, config);
let result = diffusion.diffuse(&graph);
// Should converge quickly to zero energy
assert!(result.final_energy < 0.01);
}
#[test]
fn test_adaptive_diffusion() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new().state_from_slice(&[5.0]).build();
let node2 = SheafNodeBuilder::new().state_from_slice(&[-5.0]).build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(1)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let config = SheafDiffusionConfig::default();
let diffusion = SheafDiffusion::new(&graph, config);
let result = diffusion.diffuse_adaptive(&graph);
// Adaptive should handle large initial differences
assert!(result.final_energy < result.initial_energy);
}
}

View File

@@ -0,0 +1,544 @@
//! Sheaf Laplacian
//!
//! The sheaf Laplacian L_F generalizes the graph Laplacian to sheaves.
//! It is defined as L_F = delta^* delta where delta is the coboundary.
//!
//! The spectrum of L_F reveals global structure:
//! - Zero eigenvalues correspond to cohomology classes
//! - The multiplicity of 0 equals the Betti number
//! - Small eigenvalues indicate near-obstructions
use super::sheaf::{Sheaf, SheafSection};
use crate::substrate::NodeId;
use crate::substrate::SheafGraph;
use ndarray::{Array1, Array2};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Configuration for Laplacian computation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LaplacianConfig {
/// Tolerance for zero eigenvalues
pub zero_tolerance: f64,
/// Maximum iterations for iterative eigensolvers
pub max_iterations: usize,
/// Number of eigenvalues to compute
pub num_eigenvalues: usize,
/// Whether to compute eigenvectors
pub compute_eigenvectors: bool,
}
impl Default for LaplacianConfig {
fn default() -> Self {
Self {
zero_tolerance: 1e-8,
max_iterations: 1000,
num_eigenvalues: 10,
compute_eigenvectors: true,
}
}
}
/// Spectrum of the sheaf Laplacian
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LaplacianSpectrum {
/// Eigenvalues in ascending order
pub eigenvalues: Vec<f64>,
/// Eigenvectors (optional)
pub eigenvectors: Option<Vec<Array1<f64>>>,
/// Number of zero eigenvalues (cohomology dimension)
pub null_space_dim: usize,
/// Spectral gap (smallest positive eigenvalue)
pub spectral_gap: Option<f64>,
}
impl LaplacianSpectrum {
/// Get the n-th Betti number from null space dimension
pub fn betti_number(&self) -> usize {
self.null_space_dim
}
/// Check if there's a spectral gap
pub fn has_spectral_gap(&self) -> bool {
self.spectral_gap.is_some()
}
/// Get harmonic representatives (eigenvectors with zero eigenvalue)
pub fn harmonic_representatives(&self) -> Vec<&Array1<f64>> {
if let Some(ref evecs) = self.eigenvectors {
evecs.iter().take(self.null_space_dim).collect()
} else {
Vec::new()
}
}
}
/// A harmonic representative of a cohomology class
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HarmonicRepresentative {
/// The harmonic cochain (Laplacian = 0)
pub cochain: HashMap<NodeId, Array1<f64>>,
/// L2 norm
pub norm: f64,
/// Associated eigenvalue (should be near zero)
pub eigenvalue: f64,
}
impl HarmonicRepresentative {
/// Create from vertex values
pub fn new(cochain: HashMap<NodeId, Array1<f64>>, eigenvalue: f64) -> Self {
let norm = cochain
.values()
.map(|v| v.iter().map(|x| x * x).sum::<f64>())
.sum::<f64>()
.sqrt();
Self {
cochain,
norm,
eigenvalue,
}
}
/// Normalize to unit norm
pub fn normalize(&mut self) {
if self.norm > 1e-10 {
let scale = 1.0 / self.norm;
for v in self.cochain.values_mut() {
*v = &*v * scale;
}
self.norm = 1.0;
}
}
}
/// The sheaf Laplacian L_F = delta^* delta
pub struct SheafLaplacian {
/// Configuration
config: LaplacianConfig,
/// Edge list (source, target, edge_weight)
edges: Vec<(NodeId, NodeId, f64)>,
/// Vertex list with stalk dimensions
vertices: Vec<(NodeId, usize)>,
/// Total dimension
total_dim: usize,
/// Vertex to index mapping
vertex_to_idx: HashMap<NodeId, usize>,
/// Vertex to offset in global vector
vertex_to_offset: HashMap<NodeId, usize>,
}
impl SheafLaplacian {
/// Create a sheaf Laplacian from a SheafGraph
pub fn from_graph(graph: &SheafGraph, config: LaplacianConfig) -> Self {
let mut edges = Vec::new();
let mut vertices = Vec::new();
let mut vertex_to_idx = HashMap::new();
let mut vertex_to_offset = HashMap::new();
// Collect vertices
let mut offset = 0;
for (idx, node_id) in graph.node_ids().into_iter().enumerate() {
if let Some(node) = graph.get_node(node_id) {
let dim = node.state.dim();
vertices.push((node_id, dim));
vertex_to_idx.insert(node_id, idx);
vertex_to_offset.insert(node_id, offset);
offset += dim;
}
}
// Collect edges
for edge_id in graph.edge_ids() {
if let Some(edge) = graph.get_edge(edge_id) {
edges.push((edge.source, edge.target, edge.weight as f64));
}
}
Self {
config,
edges,
vertices,
total_dim: offset,
vertex_to_idx,
vertex_to_offset,
}
}
/// Create from a Sheaf and edge list
pub fn from_sheaf(
sheaf: &Sheaf,
edges: Vec<(NodeId, NodeId, f64)>,
config: LaplacianConfig,
) -> Self {
let mut vertices = Vec::new();
let mut vertex_to_idx = HashMap::new();
let mut vertex_to_offset = HashMap::new();
let mut offset = 0;
for (idx, vertex) in sheaf.vertices().enumerate() {
if let Some(dim) = sheaf.stalk_dim(vertex) {
vertices.push((vertex, dim));
vertex_to_idx.insert(vertex, idx);
vertex_to_offset.insert(vertex, offset);
offset += dim;
}
}
Self {
config,
edges,
vertices,
total_dim: offset,
vertex_to_idx,
vertex_to_offset,
}
}
/// Get total dimension
pub fn dimension(&self) -> usize {
self.total_dim
}
/// Build the Laplacian matrix explicitly
///
/// L = sum_e w_e (P_s - P_t)^T D_e (P_s - P_t)
/// where P_s, P_t are projection/restriction operators and D_e is edge weight
pub fn build_matrix(&self, graph: &SheafGraph) -> Array2<f64> {
let n = self.total_dim;
let mut laplacian = Array2::zeros((n, n));
for &(source, target, weight) in &self.edges {
let source_offset = self.vertex_to_offset.get(&source).copied().unwrap_or(0);
let target_offset = self.vertex_to_offset.get(&target).copied().unwrap_or(0);
if let Some(edge) = graph.edge_ids().into_iter().find_map(|eid| {
let e = graph.get_edge(eid)?;
if e.source == source && e.target == target {
Some(e)
} else if e.source == target && e.target == source {
Some(e)
} else {
None
}
}) {
let source_dim = self
.vertices
.iter()
.find(|(v, _)| *v == source)
.map(|(_, d)| *d)
.unwrap_or(0);
let target_dim = self
.vertices
.iter()
.find(|(v, _)| *v == target)
.map(|(_, d)| *d)
.unwrap_or(0);
// For identity restrictions, the Laplacian contribution is:
// L_ss += w_e * I
// L_tt += w_e * I
// L_st = L_ts = -w_e * I
let dim = source_dim.min(target_dim);
for i in 0..dim {
// Diagonal blocks
laplacian[[source_offset + i, source_offset + i]] += weight;
laplacian[[target_offset + i, target_offset + i]] += weight;
// Off-diagonal blocks
laplacian[[source_offset + i, target_offset + i]] -= weight;
laplacian[[target_offset + i, source_offset + i]] -= weight;
}
}
}
laplacian
}
/// Apply the Laplacian to a section (matrix-free)
///
/// L * x = sum_e w_e * (rho_s(x_s) - rho_t(x_t))^2
pub fn apply(&self, graph: &SheafGraph, section: &SheafSection) -> SheafSection {
let mut result = SheafSection::empty();
// Initialize result with zeros
for (vertex, dim) in &self.vertices {
result.set(*vertex, Array1::zeros(*dim));
}
// Add contributions from each edge
for &(source, target, weight) in &self.edges {
if let (Some(s_val), Some(t_val)) = (section.get(source), section.get(target)) {
// Residual = s_val - t_val (for identity restrictions)
let residual = s_val - t_val;
// Update source: add weight * residual
if let Some(result_s) = result.sections.get_mut(&source) {
*result_s = &*result_s + &(&residual * weight);
}
// Update target: add weight * (-residual)
if let Some(result_t) = result.sections.get_mut(&target) {
*result_t = &*result_t - &(&residual * weight);
}
}
}
result
}
/// Compute the quadratic form x^T L x (the energy)
pub fn energy(&self, graph: &SheafGraph, section: &SheafSection) -> f64 {
let mut energy = 0.0;
for &(source, target, weight) in &self.edges {
if let (Some(s_val), Some(t_val)) = (section.get(source), section.get(target)) {
let residual = s_val - t_val;
let norm_sq: f64 = residual.iter().map(|x| x * x).sum();
energy += weight * norm_sq;
}
}
energy
}
/// Compute the spectrum using power iteration
pub fn compute_spectrum(&self, graph: &SheafGraph) -> LaplacianSpectrum {
let matrix = self.build_matrix(graph);
self.compute_spectrum_from_matrix(&matrix)
}
/// Compute spectrum from explicit matrix
fn compute_spectrum_from_matrix(&self, matrix: &Array2<f64>) -> LaplacianSpectrum {
let n = matrix.nrows();
if n == 0 {
return LaplacianSpectrum {
eigenvalues: Vec::new(),
eigenvectors: None,
null_space_dim: 0,
spectral_gap: None,
};
}
// Simple power iteration for largest eigenvalues, then deflation
// For production, use proper eigenvalue solvers (LAPACK, etc.)
let num_eigs = self.config.num_eigenvalues.min(n);
let mut eigenvalues = Vec::with_capacity(num_eigs);
let mut eigenvectors = if self.config.compute_eigenvectors {
Some(Vec::with_capacity(num_eigs))
} else {
None
};
let mut deflated = matrix.clone();
for _ in 0..num_eigs {
let (eval, evec) = self.power_iteration(&deflated);
eigenvalues.push(eval);
if self.config.compute_eigenvectors {
eigenvectors.as_mut().unwrap().push(evec.clone());
}
// Deflate: A <- A - lambda * v * v^T
for i in 0..n {
for j in 0..n {
deflated[[i, j]] -= eval * evec[i] * evec[j];
}
}
}
// Count zero eigenvalues
let null_space_dim = eigenvalues
.iter()
.filter(|&&e| e.abs() < self.config.zero_tolerance)
.count();
// Find spectral gap
let spectral_gap = eigenvalues
.iter()
.find(|&&e| e > self.config.zero_tolerance)
.copied();
LaplacianSpectrum {
eigenvalues,
eigenvectors,
null_space_dim,
spectral_gap,
}
}
/// Power iteration for dominant eigenvalue
fn power_iteration(&self, matrix: &Array2<f64>) -> (f64, Array1<f64>) {
let n = matrix.nrows();
let mut v = Array1::from_elem(n, 1.0 / (n as f64).sqrt());
let mut eigenvalue = 0.0;
for _ in 0..self.config.max_iterations {
// Multiply by matrix
let mut av = Array1::zeros(n);
for i in 0..n {
for j in 0..n {
av[i] += matrix[[i, j]] * v[j];
}
}
// Compute Rayleigh quotient
let new_eigenvalue: f64 = v.iter().zip(av.iter()).map(|(a, b)| a * b).sum();
// Normalize
let norm = av.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > 1e-10 {
v = av / norm;
}
// Check convergence
if (new_eigenvalue - eigenvalue).abs() < self.config.zero_tolerance {
eigenvalue = new_eigenvalue;
break;
}
eigenvalue = new_eigenvalue;
}
(eigenvalue, v)
}
/// Find harmonic representatives (kernel of Laplacian)
pub fn harmonic_representatives(&self, graph: &SheafGraph) -> Vec<HarmonicRepresentative> {
let spectrum = self.compute_spectrum(graph);
let mut harmonics = Vec::new();
if let Some(ref eigenvectors) = spectrum.eigenvectors {
for (i, eval) in spectrum.eigenvalues.iter().enumerate() {
if eval.abs() < self.config.zero_tolerance {
// Convert eigenvector to section format
let evec = &eigenvectors[i];
let mut cochain = HashMap::new();
for (vertex, dim) in &self.vertices {
let offset = self.vertex_to_offset.get(vertex).copied().unwrap_or(0);
let values = Array1::from_iter((0..*dim).map(|j| evec[offset + j]));
cochain.insert(*vertex, values);
}
harmonics.push(HarmonicRepresentative::new(cochain, *eval));
}
}
}
harmonics
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::substrate::edge::SheafEdgeBuilder;
use crate::substrate::node::SheafNodeBuilder;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_laplacian_simple() {
let graph = SheafGraph::new();
// Two nodes with same state
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(2)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let config = LaplacianConfig::default();
let laplacian = SheafLaplacian::from_graph(&graph, config);
// Build and check matrix
let matrix = laplacian.build_matrix(&graph);
assert_eq!(matrix.nrows(), 4); // 2 nodes * 2 dimensions
// Laplacian should be positive semi-definite
let spectrum = laplacian.compute_spectrum(&graph);
for eval in &spectrum.eigenvalues {
assert!(*eval >= -1e-10);
}
}
#[test]
fn test_laplacian_energy() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new().state_from_slice(&[1.0]).build();
let node2 = SheafNodeBuilder::new().state_from_slice(&[2.0]).build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(1)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let config = LaplacianConfig::default();
let laplacian = SheafLaplacian::from_graph(&graph, config);
// Create section from graph states
let mut section = SheafSection::empty();
section.set(id1, Array1::from_vec(vec![1.0]));
section.set(id2, Array1::from_vec(vec![2.0]));
// Energy = |1 - 2|^2 = 1
let energy = laplacian.energy(&graph, &section);
assert!((energy - 1.0).abs() < 1e-10);
}
#[test]
fn test_connected_graph_has_one_zero_eigenvalue() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new().state_from_slice(&[1.0]).build();
let node2 = SheafNodeBuilder::new().state_from_slice(&[1.0]).build();
let node3 = SheafNodeBuilder::new().state_from_slice(&[1.0]).build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let id3 = graph.add_node(node3);
// Create a path: 1 -- 2 -- 3
let edge1 = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(1)
.weight(1.0)
.build();
let edge2 = SheafEdgeBuilder::new(id2, id3)
.identity_restrictions(1)
.weight(1.0)
.build();
graph.add_edge(edge1).unwrap();
graph.add_edge(edge2).unwrap();
let config = LaplacianConfig {
num_eigenvalues: 3,
..Default::default()
};
let laplacian = SheafLaplacian::from_graph(&graph, config);
let spectrum = laplacian.compute_spectrum(&graph);
// Connected graph should have exactly one zero eigenvalue
// (corresponding to constant functions)
assert_eq!(spectrum.null_space_dim, 1);
}
}

View File

@@ -0,0 +1,60 @@
//! Sheaf Cohomology Module for Prime-Radiant
//!
//! This module implements sheaf cohomology computations for detecting global
//! inconsistencies (obstructions) in the coherence graph. Sheaf cohomology
//! provides powerful tools for understanding when local consistency cannot
//! be extended to global consistency.
//!
//! # Mathematical Background
//!
//! For a sheaf F on a graph G, the cohomology groups H^n(G, F) measure
//! obstructions to extending local sections to global ones:
//!
//! - **H^0(G, F)**: Global sections (globally consistent assignments)
//! - **H^1(G, F)**: First cohomology (obstructions to patching local data)
//!
//! The key computational tool is the **coboundary operator** delta:
//! ```text
//! delta^0: C^0(G, F) -> C^1(G, F)
//! (delta^0 f)(e) = rho_t(f(t(e))) - rho_s(f(s(e)))
//! ```
//!
//! where rho_s, rho_t are the restriction maps on edge e.
//!
//! # Sheaf Laplacian
//!
//! The **sheaf Laplacian** L = delta^T delta generalizes the graph Laplacian:
//! ```text
//! L_F = sum_e w_e (rho_s - rho_t)^T (rho_s - rho_t)
//! ```
//!
//! Its spectrum reveals global structure:
//! - Zero eigenvalues correspond to cohomology classes
//! - Small eigenvalues indicate near-obstructions
//!
//! # References
//!
//! 1. Hansen, J., & Ghrist, R. (2019). "Toward a spectral theory of cellular sheaves."
//! 2. Robinson, M. (2014). "Topological Signal Processing."
//! 3. Curry, J. (2014). "Sheaves, Cosheaves, and Applications."
mod cocycle;
mod cohomology_group;
mod diffusion;
mod laplacian;
mod neural;
mod obstruction;
mod sheaf;
mod simplex;
pub use cocycle::{Coboundary, Cocycle, CocycleBuilder};
pub use cohomology_group::{BettiNumbers, CohomologyComputer, CohomologyConfig, CohomologyGroup};
pub use diffusion::{DiffusionResult, ObstructionIndicator, SheafDiffusion, SheafDiffusionConfig};
pub use laplacian::{HarmonicRepresentative, LaplacianConfig, LaplacianSpectrum, SheafLaplacian};
pub use neural::{
Activation, CohomologyPooling, PoolingMethod, SheafConvolution, SheafNeuralConfig,
SheafNeuralLayer,
};
pub use obstruction::{Obstruction, ObstructionDetector, ObstructionReport, ObstructionSeverity};
pub use sheaf::{LocalSection, Sheaf, SheafBuilder, SheafSection, Stalk};
pub use simplex::{Chain, Cochain, Simplex, SimplexId, SimplicialComplex};

View File

@@ -0,0 +1,649 @@
//! Sheaf Neural Network Layers
//!
//! Neural network layers that respect sheaf structure, enabling
//! coherence-aware deep learning.
use super::laplacian::{LaplacianConfig, SheafLaplacian};
use super::sheaf::{Sheaf, SheafSection};
use crate::substrate::NodeId;
use crate::substrate::SheafGraph;
use ndarray::{Array1, Array2};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Activation functions for neural layers
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum Activation {
/// No activation (identity)
Identity,
/// ReLU: max(0, x)
ReLU,
/// Leaky ReLU: max(alpha * x, x)
LeakyReLU(f64),
/// Sigmoid: 1 / (1 + exp(-x))
Sigmoid,
/// Tanh: tanh(x)
Tanh,
/// GELU: x * Phi(x)
GELU,
/// Softmax (applied per-node)
Softmax,
}
impl Activation {
/// Apply activation function
pub fn apply(&self, x: f64) -> f64 {
match self {
Activation::Identity => x,
Activation::ReLU => x.max(0.0),
Activation::LeakyReLU(alpha) => {
if x > 0.0 {
x
} else {
alpha * x
}
}
Activation::Sigmoid => 1.0 / (1.0 + (-x).exp()),
Activation::Tanh => x.tanh(),
Activation::GELU => {
// Approximation: x * sigmoid(1.702 * x)
let sigmoid = 1.0 / (1.0 + (-1.702 * x).exp());
x * sigmoid
}
Activation::Softmax => x, // Softmax handled separately
}
}
/// Apply activation to array
pub fn apply_array(&self, arr: &Array1<f64>) -> Array1<f64> {
match self {
Activation::Softmax => {
let max_val = arr.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
let exp_vals: Array1<f64> = arr.mapv(|x| (x - max_val).exp());
let sum: f64 = exp_vals.sum();
exp_vals / sum
}
_ => arr.mapv(|x| self.apply(x)),
}
}
/// Compute derivative
pub fn derivative(&self, x: f64) -> f64 {
match self {
Activation::Identity => 1.0,
Activation::ReLU => {
if x > 0.0 {
1.0
} else {
0.0
}
}
Activation::LeakyReLU(alpha) => {
if x > 0.0 {
1.0
} else {
*alpha
}
}
Activation::Sigmoid => {
let s = self.apply(x);
s * (1.0 - s)
}
Activation::Tanh => {
let t = x.tanh();
1.0 - t * t
}
Activation::GELU => {
// Derivative of GELU approximation
let sigmoid = 1.0 / (1.0 + (-1.702 * x).exp());
sigmoid + x * 1.702 * sigmoid * (1.0 - sigmoid)
}
Activation::Softmax => 1.0, // Jacobian needed for full derivative
}
}
}
/// Configuration for sheaf neural layer
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SheafNeuralConfig {
/// Input dimension per node
pub input_dim: usize,
/// Output dimension per node
pub output_dim: usize,
/// Number of diffusion steps
pub diffusion_steps: usize,
/// Diffusion coefficient
pub diffusion_coeff: f64,
/// Activation function
pub activation: Activation,
/// Dropout rate
pub dropout: f64,
/// Whether to use residual connection
pub use_residual: bool,
/// Whether to normalize output
pub layer_norm: bool,
}
impl Default for SheafNeuralConfig {
fn default() -> Self {
Self {
input_dim: 64,
output_dim: 64,
diffusion_steps: 3,
diffusion_coeff: 0.5,
activation: Activation::ReLU,
dropout: 0.0,
layer_norm: true,
use_residual: true,
}
}
}
/// A sheaf-aware neural network layer
///
/// Combines linear transformation with sheaf diffusion to produce
/// outputs that respect graph structure.
#[derive(Clone)]
pub struct SheafNeuralLayer {
/// Configuration
config: SheafNeuralConfig,
/// Weight matrix (output_dim x input_dim)
weights: Array2<f64>,
/// Bias vector (output_dim)
bias: Array1<f64>,
/// Diffusion weight (how much to mix diffusion vs direct)
diffusion_weight: f64,
}
impl SheafNeuralLayer {
/// Create a new layer with Xavier initialization
pub fn new(config: SheafNeuralConfig) -> Self {
let scale = (2.0 / (config.input_dim + config.output_dim) as f64).sqrt();
// Initialize weights with Xavier
let weights = Array2::from_shape_fn((config.output_dim, config.input_dim), |_| {
rand::random::<f64>() * scale - scale / 2.0
});
let bias = Array1::zeros(config.output_dim);
Self {
config,
weights,
bias,
diffusion_weight: 0.5,
}
}
/// Create with specific weights
pub fn with_weights(
config: SheafNeuralConfig,
weights: Array2<f64>,
bias: Array1<f64>,
) -> Self {
assert_eq!(weights.nrows(), config.output_dim);
assert_eq!(weights.ncols(), config.input_dim);
assert_eq!(bias.len(), config.output_dim);
Self {
config,
weights,
bias,
diffusion_weight: 0.5,
}
}
/// Set diffusion weight
pub fn set_diffusion_weight(&mut self, weight: f64) {
self.diffusion_weight = weight.clamp(0.0, 1.0);
}
/// Forward pass on a section
///
/// output = activation(W * diffuse(x) + b)
pub fn forward(&self, graph: &SheafGraph, input: &SheafSection) -> SheafSection {
let mut output = SheafSection::empty();
// Step 1: Apply linear transformation at each node
for (node_id, input_vec) in &input.sections {
let transformed = self.weights.dot(input_vec) + &self.bias;
output.set(*node_id, transformed);
}
// Step 2: Apply sheaf diffusion
if self.config.diffusion_steps > 0 && self.diffusion_weight > 0.0 {
let laplacian_config = LaplacianConfig::default();
let laplacian = SheafLaplacian::from_graph(graph, laplacian_config);
for _ in 0..self.config.diffusion_steps {
let laplacian_out = laplacian.apply(graph, &output);
// Update: x = x - alpha * L * x
for (node_id, out_vec) in output.sections.iter_mut() {
if let Some(lap_vec) = laplacian_out.sections.get(node_id) {
let scale = self.diffusion_weight * self.config.diffusion_coeff;
*out_vec = &*out_vec - &(lap_vec * scale);
}
}
}
}
// Step 3: Apply activation
for out_vec in output.sections.values_mut() {
*out_vec = self.config.activation.apply_array(out_vec);
}
// Step 4: Residual connection (if dimensions match and enabled)
if self.config.use_residual && self.config.input_dim == self.config.output_dim {
for (node_id, out_vec) in output.sections.iter_mut() {
if let Some(in_vec) = input.sections.get(node_id) {
*out_vec = &*out_vec + in_vec;
}
}
}
// Step 5: Layer normalization
if self.config.layer_norm {
for out_vec in output.sections.values_mut() {
let mean: f64 = out_vec.mean().unwrap_or(0.0);
let std: f64 = out_vec.std(0.0);
if std > 1e-10 {
*out_vec = out_vec.mapv(|x| (x - mean) / std);
}
}
}
output
}
/// Get weights
pub fn weights(&self) -> &Array2<f64> {
&self.weights
}
/// Get bias
pub fn bias(&self) -> &Array1<f64> {
&self.bias
}
/// Set weights (for training)
pub fn set_weights(&mut self, weights: Array2<f64>) {
assert_eq!(weights.shape(), self.weights.shape());
self.weights = weights;
}
/// Set bias (for training)
pub fn set_bias(&mut self, bias: Array1<f64>) {
assert_eq!(bias.len(), self.bias.len());
self.bias = bias;
}
}
impl std::fmt::Debug for SheafNeuralLayer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SheafNeuralLayer")
.field("input_dim", &self.config.input_dim)
.field("output_dim", &self.config.output_dim)
.field("diffusion_steps", &self.config.diffusion_steps)
.field("activation", &self.config.activation)
.finish()
}
}
/// Sheaf convolution layer
///
/// Generalizes graph convolution using sheaf structure
#[derive(Clone)]
pub struct SheafConvolution {
/// Input dimension
input_dim: usize,
/// Output dimension
output_dim: usize,
/// Weight for self-features
self_weight: Array2<f64>,
/// Weight for neighbor features
neighbor_weight: Array2<f64>,
/// Bias
bias: Array1<f64>,
/// Activation
activation: Activation,
}
impl SheafConvolution {
/// Create a new sheaf convolution layer
pub fn new(input_dim: usize, output_dim: usize) -> Self {
let scale = (2.0 / (input_dim + output_dim) as f64).sqrt();
let self_weight = Array2::from_shape_fn((output_dim, input_dim), |_| {
rand::random::<f64>() * scale - scale / 2.0
});
let neighbor_weight = Array2::from_shape_fn((output_dim, input_dim), |_| {
rand::random::<f64>() * scale - scale / 2.0
});
let bias = Array1::zeros(output_dim);
Self {
input_dim,
output_dim,
self_weight,
neighbor_weight,
bias,
activation: Activation::ReLU,
}
}
/// Set activation function
pub fn with_activation(mut self, activation: Activation) -> Self {
self.activation = activation;
self
}
/// Forward pass
///
/// h_v = activation(W_self * x_v + W_neigh * sum_u rho_{u->v}(x_u) / deg(v) + b)
pub fn forward(&self, graph: &SheafGraph, input: &SheafSection) -> SheafSection {
let mut output = SheafSection::empty();
for node_id in graph.node_ids() {
if let Some(self_vec) = input.get(node_id) {
// Self contribution
let mut h = self.self_weight.dot(self_vec);
// Neighbor contribution (average of restricted neighbors)
let neighbors: Vec<_> = graph.edges_incident_to(node_id);
if !neighbors.is_empty() {
let mut neighbor_sum = Array1::zeros(self.input_dim);
let mut count = 0;
for edge_id in neighbors {
if let Some(edge) = graph.get_edge(edge_id) {
let neighbor_id = if edge.source == node_id {
edge.target
} else {
edge.source
};
if let Some(neighbor_vec) = input.get(neighbor_id) {
// For identity restriction, just add neighbor
// For general restriction, would apply rho here
neighbor_sum = neighbor_sum + neighbor_vec;
count += 1;
}
}
}
if count > 0 {
neighbor_sum /= count as f64;
h = h + self.neighbor_weight.dot(&neighbor_sum);
}
}
// Add bias and apply activation
h = h + &self.bias;
h = self.activation.apply_array(&h);
output.set(node_id, h);
}
}
output
}
}
impl std::fmt::Debug for SheafConvolution {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SheafConvolution")
.field("input_dim", &self.input_dim)
.field("output_dim", &self.output_dim)
.field("activation", &self.activation)
.finish()
}
}
/// Cohomology-aware pooling layer
///
/// Pools node features while preserving cohomological structure
#[derive(Clone)]
pub struct CohomologyPooling {
/// Pooling method
method: PoolingMethod,
/// Whether to weight by node importance (from Laplacian spectrum)
spectral_weighting: bool,
}
/// Pooling methods
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum PoolingMethod {
/// Mean of all nodes
Mean,
/// Max over all nodes
Max,
/// Sum over all nodes
Sum,
/// Attention-weighted sum
Attention,
/// Top-k nodes by energy
TopK(usize),
}
impl CohomologyPooling {
/// Create a new pooling layer
pub fn new(method: PoolingMethod) -> Self {
Self {
method,
spectral_weighting: false,
}
}
/// Enable spectral weighting
pub fn with_spectral_weighting(mut self) -> Self {
self.spectral_weighting = true;
self
}
/// Pool section to single vector
pub fn pool(&self, graph: &SheafGraph, section: &SheafSection) -> Array1<f64> {
if section.sections.is_empty() {
return Array1::zeros(0);
}
let dim = section
.sections
.values()
.next()
.map(|v| v.len())
.unwrap_or(0);
match self.method {
PoolingMethod::Mean => {
let mut sum = Array1::zeros(dim);
let mut count = 0;
for vec in section.sections.values() {
sum = sum + vec;
count += 1;
}
if count > 0 {
sum / count as f64
} else {
sum
}
}
PoolingMethod::Max => {
let mut max_vec = Array1::from_elem(dim, f64::NEG_INFINITY);
for vec in section.sections.values() {
for (i, &val) in vec.iter().enumerate() {
max_vec[i] = max_vec[i].max(val);
}
}
max_vec
}
PoolingMethod::Sum => {
let mut sum = Array1::zeros(dim);
for vec in section.sections.values() {
sum = sum + vec;
}
sum
}
PoolingMethod::Attention => {
// Simple attention: weight by L2 norm
let mut sum = Array1::zeros(dim);
let mut total_weight = 0.0;
for vec in section.sections.values() {
let weight = vec.iter().map(|x| x * x).sum::<f64>().sqrt();
sum = sum + vec * weight;
total_weight += weight;
}
if total_weight > 0.0 {
sum / total_weight
} else {
sum
}
}
PoolingMethod::TopK(k) => {
// Select top k nodes by L2 norm
let mut node_norms: Vec<_> = section
.sections
.iter()
.map(|(id, vec)| (*id, vec.iter().map(|x| x * x).sum::<f64>()))
.collect();
node_norms
.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
let mut sum = Array1::zeros(dim);
for (node_id, _) in node_norms.into_iter().take(k) {
if let Some(vec) = section.get(node_id) {
sum = sum + vec;
}
}
sum / k as f64
}
}
}
}
impl std::fmt::Debug for CohomologyPooling {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CohomologyPooling")
.field("method", &self.method)
.field("spectral_weighting", &self.spectral_weighting)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::substrate::edge::SheafEdgeBuilder;
use crate::substrate::node::SheafNodeBuilder;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_activation_functions() {
assert!((Activation::ReLU.apply(-1.0) - 0.0).abs() < 1e-10);
assert!((Activation::ReLU.apply(1.0) - 1.0).abs() < 1e-10);
assert!((Activation::Sigmoid.apply(0.0) - 0.5).abs() < 1e-10);
let arr = Array1::from_vec(vec![1.0, 2.0, 3.0]);
let softmax = Activation::Softmax.apply_array(&arr);
assert!((softmax.sum() - 1.0).abs() < 1e-10);
}
#[test]
fn test_sheaf_neural_layer() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0, 0.0, 0.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[0.0, 1.0, 0.0, 0.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(4)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let config = SheafNeuralConfig {
input_dim: 4,
output_dim: 2,
diffusion_steps: 1,
..Default::default()
};
let layer = SheafNeuralLayer::new(config);
// Create input section
let mut input = SheafSection::empty();
input.set(id1, Array1::from_vec(vec![1.0, 0.0, 0.0, 0.0]));
input.set(id2, Array1::from_vec(vec![0.0, 1.0, 0.0, 0.0]));
let output = layer.forward(&graph, &input);
assert!(output.contains(id1));
assert!(output.contains(id2));
assert_eq!(output.get(id1).unwrap().len(), 2);
}
#[test]
fn test_sheaf_convolution() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[0.0, 1.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(2)
.build();
graph.add_edge(edge).unwrap();
let conv = SheafConvolution::new(2, 3);
let mut input = SheafSection::empty();
input.set(id1, Array1::from_vec(vec![1.0, 0.0]));
input.set(id2, Array1::from_vec(vec![0.0, 1.0]));
let output = conv.forward(&graph, &input);
assert!(output.contains(id1));
assert_eq!(output.get(id1).unwrap().len(), 3);
}
#[test]
fn test_pooling() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new().state_from_slice(&[1.0]).build();
let node2 = SheafNodeBuilder::new().state_from_slice(&[3.0]).build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let mut section = SheafSection::empty();
section.set(id1, Array1::from_vec(vec![1.0]));
section.set(id2, Array1::from_vec(vec![3.0]));
let mean_pool = CohomologyPooling::new(PoolingMethod::Mean);
let result = mean_pool.pool(&graph, &section);
assert!((result[0] - 2.0).abs() < 1e-10);
let max_pool = CohomologyPooling::new(PoolingMethod::Max);
let result = max_pool.pool(&graph, &section);
assert!((result[0] - 3.0).abs() < 1e-10);
}
}

View File

@@ -0,0 +1,524 @@
//! Obstruction Detection
//!
//! Obstructions are cohomological objects that indicate global inconsistency.
//! A non-trivial obstruction means that local data cannot be patched together
//! into a global section.
use super::cocycle::{Cocycle, SheafCoboundary};
use super::laplacian::{HarmonicRepresentative, LaplacianConfig, SheafLaplacian};
use super::sheaf::{Sheaf, SheafSection};
use crate::substrate::NodeId;
use crate::substrate::SheafGraph;
use ndarray::Array1;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Severity of an obstruction
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum ObstructionSeverity {
/// No obstruction (fully coherent)
None,
/// Minor obstruction (near-coherent, easily fixable)
Minor,
/// Moderate obstruction (requires attention)
Moderate,
/// Severe obstruction (significant inconsistency)
Severe,
/// Critical obstruction (fundamental contradiction)
Critical,
}
impl ObstructionSeverity {
/// Create from energy magnitude
pub fn from_energy(energy: f64, thresholds: &[f64; 4]) -> Self {
if energy < thresholds[0] {
Self::None
} else if energy < thresholds[1] {
Self::Minor
} else if energy < thresholds[2] {
Self::Moderate
} else if energy < thresholds[3] {
Self::Severe
} else {
Self::Critical
}
}
/// Check if this requires action
pub fn requires_action(&self) -> bool {
matches!(self, Self::Moderate | Self::Severe | Self::Critical)
}
/// Check if this is critical
pub fn is_critical(&self) -> bool {
matches!(self, Self::Critical)
}
}
/// An obstruction representing a global inconsistency
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Obstruction {
/// Unique identifier
pub id: u64,
/// Cohomology degree where obstruction lives
pub degree: usize,
/// Severity level
pub severity: ObstructionSeverity,
/// Total obstruction energy
pub energy: f64,
/// Edges contributing to obstruction (edge -> contribution)
pub edge_contributions: HashMap<(NodeId, NodeId), f64>,
/// Localization: nodes most affected
pub hotspots: Vec<(NodeId, f64)>,
/// Representative cocycle
pub cocycle: Option<Cocycle>,
/// Dimension of obstruction space
pub multiplicity: usize,
/// Description of the obstruction
pub description: String,
}
impl Obstruction {
/// Create a new obstruction
pub fn new(id: u64, degree: usize, energy: f64, severity: ObstructionSeverity) -> Self {
Self {
id,
degree,
severity,
energy,
edge_contributions: HashMap::new(),
hotspots: Vec::new(),
cocycle: None,
multiplicity: 1,
description: String::new(),
}
}
/// Add edge contribution
pub fn add_edge_contribution(&mut self, source: NodeId, target: NodeId, contribution: f64) {
self.edge_contributions
.insert((source, target), contribution);
}
/// Set hotspots
pub fn with_hotspots(mut self, hotspots: Vec<(NodeId, f64)>) -> Self {
self.hotspots = hotspots;
self
}
/// Set cocycle
pub fn with_cocycle(mut self, cocycle: Cocycle) -> Self {
self.cocycle = Some(cocycle);
self
}
/// Set description
pub fn with_description(mut self, description: impl Into<String>) -> Self {
self.description = description.into();
self
}
/// Get top k contributing edges
pub fn top_edges(&self, k: usize) -> Vec<((NodeId, NodeId), f64)> {
let mut edges: Vec<_> = self
.edge_contributions
.iter()
.map(|(&e, &c)| (e, c))
.collect();
edges.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
edges.truncate(k);
edges
}
}
/// Detailed obstruction report
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObstructionReport {
/// Total cohomological obstruction energy
pub total_energy: f64,
/// Maximum local obstruction
pub max_local_energy: f64,
/// Overall severity
pub severity: ObstructionSeverity,
/// List of detected obstructions
pub obstructions: Vec<Obstruction>,
/// Betti numbers (cohomology dimensions)
pub betti_numbers: Vec<usize>,
/// Spectral gap (if computed)
pub spectral_gap: Option<f64>,
/// Whether system is globally coherent
pub is_coherent: bool,
/// Recommendations for resolution
pub recommendations: Vec<String>,
}
impl ObstructionReport {
/// Create an empty report
pub fn empty() -> Self {
Self {
total_energy: 0.0,
max_local_energy: 0.0,
severity: ObstructionSeverity::None,
obstructions: Vec::new(),
betti_numbers: Vec::new(),
spectral_gap: None,
is_coherent: true,
recommendations: Vec::new(),
}
}
/// Create a coherent report
pub fn coherent(spectral_gap: Option<f64>) -> Self {
Self {
total_energy: 0.0,
max_local_energy: 0.0,
severity: ObstructionSeverity::None,
obstructions: Vec::new(),
betti_numbers: vec![1], // Single connected component
spectral_gap,
is_coherent: true,
recommendations: Vec::new(),
}
}
/// Add an obstruction
pub fn add_obstruction(&mut self, obs: Obstruction) {
self.total_energy += obs.energy;
self.max_local_energy = self.max_local_energy.max(obs.energy);
if obs.severity as u8 > self.severity as u8 {
self.severity = obs.severity;
}
if obs.severity.requires_action() {
self.is_coherent = false;
}
self.obstructions.push(obs);
}
/// Add a recommendation
pub fn add_recommendation(&mut self, rec: impl Into<String>) {
self.recommendations.push(rec.into());
}
/// Get critical obstructions
pub fn critical_obstructions(&self) -> Vec<&Obstruction> {
self.obstructions
.iter()
.filter(|o| o.severity.is_critical())
.collect()
}
}
/// Detector for cohomological obstructions
pub struct ObstructionDetector {
/// Energy thresholds for severity classification
thresholds: [f64; 4],
/// Laplacian configuration
laplacian_config: LaplacianConfig,
/// Whether to compute detailed cocycles
compute_cocycles: bool,
/// Number of hotspots to track
num_hotspots: usize,
}
impl ObstructionDetector {
/// Create a new detector with default settings
pub fn new() -> Self {
Self {
thresholds: [0.01, 0.1, 0.5, 1.0],
laplacian_config: LaplacianConfig::default(),
compute_cocycles: true,
num_hotspots: 5,
}
}
/// Set energy thresholds
pub fn with_thresholds(mut self, thresholds: [f64; 4]) -> Self {
self.thresholds = thresholds;
self
}
/// Set whether to compute cocycles
pub fn with_cocycles(mut self, compute: bool) -> Self {
self.compute_cocycles = compute;
self
}
/// Detect obstructions in a SheafGraph
pub fn detect(&self, graph: &SheafGraph) -> ObstructionReport {
let mut report = ObstructionReport::empty();
// Build the sheaf Laplacian
let laplacian = SheafLaplacian::from_graph(graph, self.laplacian_config.clone());
// Compute global energy from current state
let section = self.graph_to_section(graph);
let total_energy = laplacian.energy(graph, &section);
// Compute per-edge energies
let mut edge_energies: HashMap<(NodeId, NodeId), f64> = HashMap::new();
for edge_id in graph.edge_ids() {
if let Some(edge) = graph.get_edge(edge_id) {
if let (Some(source_node), Some(target_node)) =
(graph.get_node(edge.source), graph.get_node(edge.target))
{
let residual = edge.weighted_residual_energy(
source_node.state.as_slice(),
target_node.state.as_slice(),
);
edge_energies.insert((edge.source, edge.target), residual as f64);
}
}
}
// Compute spectrum for Betti numbers
let spectrum = laplacian.compute_spectrum(graph);
report.betti_numbers = vec![spectrum.null_space_dim];
report.spectral_gap = spectrum.spectral_gap;
// Create obstruction if energy is non-trivial
if total_energy > self.thresholds[0] {
let severity = ObstructionSeverity::from_energy(total_energy, &self.thresholds);
let mut obstruction = Obstruction::new(1, 1, total_energy, severity);
// Add edge contributions
for ((source, target), energy) in &edge_energies {
obstruction.add_edge_contribution(*source, *target, *energy);
}
// Find hotspots (nodes with highest adjacent energy)
let node_energies = self.compute_node_energies(graph, &edge_energies);
let mut hotspots: Vec<_> = node_energies.into_iter().collect();
hotspots.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
hotspots.truncate(self.num_hotspots);
obstruction = obstruction.with_hotspots(hotspots);
// Set description
let desc = format!(
"H^1 obstruction: {} edges with total energy {:.4}",
edge_energies.len(),
total_energy
);
obstruction = obstruction.with_description(desc);
report.add_obstruction(obstruction);
}
report.total_energy = total_energy;
report.max_local_energy = edge_energies.values().copied().fold(0.0, f64::max);
// Generate recommendations
self.generate_recommendations(&mut report);
report
}
/// Convert graph state to section
fn graph_to_section(&self, graph: &SheafGraph) -> SheafSection {
let mut section = SheafSection::empty();
for node_id in graph.node_ids() {
if let Some(node) = graph.get_node(node_id) {
let values: Vec<f64> = node.state.as_slice().iter().map(|&x| x as f64).collect();
section.set(node_id, Array1::from_vec(values));
}
}
section
}
/// Compute energy per node (sum of incident edge energies)
fn compute_node_energies(
&self,
graph: &SheafGraph,
edge_energies: &HashMap<(NodeId, NodeId), f64>,
) -> HashMap<NodeId, f64> {
let mut node_energies: HashMap<NodeId, f64> = HashMap::new();
for ((source, target), energy) in edge_energies {
*node_energies.entry(*source).or_insert(0.0) += energy;
*node_energies.entry(*target).or_insert(0.0) += energy;
}
node_energies
}
/// Generate recommendations based on obstructions
fn generate_recommendations(&self, report: &mut ObstructionReport) {
if report.is_coherent {
report.add_recommendation("System is coherent - no action required");
return;
}
// Collect recommendations first to avoid borrow checker issues
let mut recommendations: Vec<String> = Vec::new();
for obs in &report.obstructions {
match obs.severity {
ObstructionSeverity::Minor => {
recommendations.push(format!(
"Minor inconsistency detected. Consider reviewing edges: {:?}",
obs.top_edges(2).iter().map(|(e, _)| e).collect::<Vec<_>>()
));
}
ObstructionSeverity::Moderate => {
recommendations.push(format!(
"Moderate obstruction. Focus on hotspot nodes: {:?}",
obs.hotspots
.iter()
.take(3)
.map(|(n, _)| n)
.collect::<Vec<_>>()
));
}
ObstructionSeverity::Severe | ObstructionSeverity::Critical => {
recommendations.push(format!(
"Severe obstruction with energy {:.4}. Immediate review required.",
obs.energy
));
recommendations
.push("Consider isolating incoherent region using MinCut".to_string());
}
_ => {}
}
}
if report.spectral_gap.is_some_and(|g| g < 0.1) {
recommendations.push(
"Small spectral gap indicates near-obstruction. Monitor for drift.".to_string(),
);
}
// Now add all recommendations
for rec in recommendations {
report.add_recommendation(rec);
}
}
}
impl Default for ObstructionDetector {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::substrate::edge::SheafEdgeBuilder;
use crate::substrate::node::SheafNodeBuilder;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_coherent_system() {
let graph = SheafGraph::new();
// Two nodes with same state
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(2)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let detector = ObstructionDetector::new();
let report = detector.detect(&graph);
assert!(report.is_coherent);
assert!(report.total_energy < 0.01);
assert_eq!(report.severity, ObstructionSeverity::None);
}
#[test]
fn test_incoherent_system() {
let graph = SheafGraph::new();
// Two nodes with different states
let node1 = SheafNodeBuilder::new()
.state_from_slice(&[1.0, 0.0])
.build();
let node2 = SheafNodeBuilder::new()
.state_from_slice(&[0.0, 1.0])
.build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let edge = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(2)
.weight(1.0)
.build();
graph.add_edge(edge).unwrap();
let detector = ObstructionDetector::new();
let report = detector.detect(&graph);
assert!(!report.is_coherent || report.total_energy > 0.01);
assert!(report.total_energy > 0.5);
}
#[test]
fn test_severity_classification() {
assert_eq!(
ObstructionSeverity::from_energy(0.001, &[0.01, 0.1, 0.5, 1.0]),
ObstructionSeverity::None
);
assert_eq!(
ObstructionSeverity::from_energy(0.05, &[0.01, 0.1, 0.5, 1.0]),
ObstructionSeverity::Minor
);
assert_eq!(
ObstructionSeverity::from_energy(2.0, &[0.01, 0.1, 0.5, 1.0]),
ObstructionSeverity::Critical
);
}
#[test]
fn test_obstruction_hotspots() {
let graph = SheafGraph::new();
let node1 = SheafNodeBuilder::new().state_from_slice(&[1.0]).build();
let node2 = SheafNodeBuilder::new().state_from_slice(&[5.0]).build();
let node3 = SheafNodeBuilder::new().state_from_slice(&[1.5]).build();
let id1 = graph.add_node(node1);
let id2 = graph.add_node(node2);
let id3 = graph.add_node(node3);
let edge1 = SheafEdgeBuilder::new(id1, id2)
.identity_restrictions(1)
.weight(1.0)
.build();
let edge2 = SheafEdgeBuilder::new(id2, id3)
.identity_restrictions(1)
.weight(1.0)
.build();
graph.add_edge(edge1).unwrap();
graph.add_edge(edge2).unwrap();
let detector = ObstructionDetector::new();
let report = detector.detect(&graph);
// Node 2 should be a hotspot (connects to both high-energy edges)
if let Some(obs) = report.obstructions.first() {
assert!(!obs.hotspots.is_empty());
}
}
}

View File

@@ -0,0 +1,463 @@
//! Sheaf Data Structure
//!
//! A sheaf on a graph assigns:
//! - A vector space (stalk) to each vertex
//! - Restriction maps between adjacent stalks
//!
//! This is the foundational structure for cohomology computation.
use crate::substrate::NodeId;
use crate::substrate::{RestrictionMap, SheafGraph};
use ndarray::{Array1, Array2};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
/// A stalk (fiber) at a vertex - the local data space
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Stalk {
/// Dimension of the stalk (vector space dimension)
pub dimension: usize,
/// Optional basis vectors (if not standard basis)
pub basis: Option<Array2<f64>>,
}
impl Stalk {
/// Create a stalk of given dimension with standard basis
pub fn new(dimension: usize) -> Self {
Self {
dimension,
basis: None,
}
}
/// Create a stalk with a custom basis
pub fn with_basis(dimension: usize, basis: Array2<f64>) -> Self {
assert_eq!(basis.ncols(), dimension, "Basis dimension mismatch");
Self {
dimension,
basis: Some(basis),
}
}
/// Get dimension
pub fn dim(&self) -> usize {
self.dimension
}
}
/// A local section assigns a value in the stalk at each vertex
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LocalSection {
/// Vertex ID
pub vertex: NodeId,
/// Value in the stalk (as a vector)
pub value: Array1<f64>,
}
impl LocalSection {
/// Create a new local section
pub fn new(vertex: NodeId, value: Array1<f64>) -> Self {
Self { vertex, value }
}
/// Create from f32 slice
pub fn from_slice(vertex: NodeId, data: &[f32]) -> Self {
let value = Array1::from_iter(data.iter().map(|&x| x as f64));
Self { vertex, value }
}
/// Get dimension
pub fn dim(&self) -> usize {
self.value.len()
}
}
/// A sheaf section is a collection of local sections that are compatible
/// under restriction maps
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SheafSection {
/// Local sections indexed by vertex
pub sections: HashMap<NodeId, Array1<f64>>,
/// Whether this is a global section (fully consistent)
pub is_global: bool,
}
impl SheafSection {
/// Create an empty section
pub fn empty() -> Self {
Self {
sections: HashMap::new(),
is_global: false,
}
}
/// Create a section from local data
pub fn from_local(sections: HashMap<NodeId, Array1<f64>>) -> Self {
Self {
sections,
is_global: false,
}
}
/// Get the value at a vertex
pub fn get(&self, vertex: NodeId) -> Option<&Array1<f64>> {
self.sections.get(&vertex)
}
/// Set the value at a vertex
pub fn set(&mut self, vertex: NodeId, value: Array1<f64>) {
self.sections.insert(vertex, value);
self.is_global = false; // Need to recheck
}
/// Check if a vertex is in the section's domain
pub fn contains(&self, vertex: NodeId) -> bool {
self.sections.contains_key(&vertex)
}
/// Number of vertices with assigned values
pub fn support_size(&self) -> usize {
self.sections.len()
}
}
/// Type alias for restriction map function
pub type RestrictionFn = Arc<dyn Fn(&Array1<f64>) -> Array1<f64> + Send + Sync>;
/// A sheaf on a graph
///
/// Assigns stalks to vertices and restriction maps to edges
#[derive(Clone)]
pub struct Sheaf {
/// Stalks at each vertex
pub stalks: HashMap<NodeId, Stalk>,
/// Restriction maps indexed by (source, target) pairs
/// The map rho_{u->v} restricts from stalk at u to edge space
restriction_maps: HashMap<(NodeId, NodeId), RestrictionFn>,
/// Cached dimensions for performance
stalk_dims: HashMap<NodeId, usize>,
/// Total dimension (sum of all stalk dimensions)
total_dim: usize,
}
impl Sheaf {
/// Create a new empty sheaf
pub fn new() -> Self {
Self {
stalks: HashMap::new(),
restriction_maps: HashMap::new(),
stalk_dims: HashMap::new(),
total_dim: 0,
}
}
/// Build a sheaf from a SheafGraph
///
/// Uses the graph's state vectors as stalks and restriction maps from edges
pub fn from_graph(graph: &SheafGraph) -> Self {
let mut sheaf = Self::new();
// Add stalks from nodes
for node_id in graph.node_ids() {
if let Some(node) = graph.get_node(node_id) {
let dim = node.state.dim();
sheaf.add_stalk(node_id, Stalk::new(dim));
}
}
// Add restriction maps from edges
for edge_id in graph.edge_ids() {
if let Some(edge) = graph.get_edge(edge_id) {
let source = edge.source;
let target = edge.target;
// Create restriction functions from the edge's restriction maps
let source_rho = edge.rho_source.clone();
let target_rho = edge.rho_target.clone();
// Source restriction map
let source_fn: RestrictionFn = Arc::new(move |v: &Array1<f64>| {
let input: Vec<f32> = v.iter().map(|&x| x as f32).collect();
let output = source_rho.apply(&input);
Array1::from_iter(output.iter().map(|&x| x as f64))
});
// Target restriction map
let target_fn: RestrictionFn = Arc::new(move |v: &Array1<f64>| {
let input: Vec<f32> = v.iter().map(|&x| x as f32).collect();
let output = target_rho.apply(&input);
Array1::from_iter(output.iter().map(|&x| x as f64))
});
sheaf.add_restriction(source, target, source_fn.clone());
sheaf.add_restriction(target, source, target_fn);
}
}
sheaf
}
/// Add a stalk at a vertex
pub fn add_stalk(&mut self, vertex: NodeId, stalk: Stalk) {
let dim = stalk.dimension;
self.stalks.insert(vertex, stalk);
self.stalk_dims.insert(vertex, dim);
self.total_dim = self.stalk_dims.values().sum();
}
/// Add a restriction map
pub fn add_restriction(&mut self, source: NodeId, target: NodeId, map: RestrictionFn) {
self.restriction_maps.insert((source, target), map);
}
/// Get the stalk at a vertex
pub fn get_stalk(&self, vertex: NodeId) -> Option<&Stalk> {
self.stalks.get(&vertex)
}
/// Get stalk dimension
pub fn stalk_dim(&self, vertex: NodeId) -> Option<usize> {
self.stalk_dims.get(&vertex).copied()
}
/// Apply restriction map from source to target
pub fn restrict(
&self,
source: NodeId,
target: NodeId,
value: &Array1<f64>,
) -> Option<Array1<f64>> {
self.restriction_maps
.get(&(source, target))
.map(|rho| rho(value))
}
/// Check if a section is globally consistent
///
/// A section is consistent if for every edge (u,v):
/// rho_u(s(u)) = rho_v(s(v))
pub fn is_consistent(&self, section: &SheafSection, tolerance: f64) -> bool {
for &(source, target) in self.restriction_maps.keys() {
if let (Some(s_val), Some(t_val)) = (section.get(source), section.get(target)) {
let s_restricted = self.restrict(source, target, s_val);
let t_restricted = self.restrict(target, source, t_val);
if let (Some(s_r), Some(t_r)) = (s_restricted, t_restricted) {
let diff = &s_r - &t_r;
let norm: f64 = diff.iter().map(|x| x * x).sum::<f64>().sqrt();
if norm > tolerance {
return false;
}
}
}
}
true
}
/// Compute residual (inconsistency) at an edge
pub fn edge_residual(
&self,
source: NodeId,
target: NodeId,
section: &SheafSection,
) -> Option<Array1<f64>> {
let s_val = section.get(source)?;
let t_val = section.get(target)?;
let s_restricted = self.restrict(source, target, s_val)?;
let t_restricted = self.restrict(target, source, t_val)?;
Some(&s_restricted - &t_restricted)
}
/// Total dimension of the sheaf
pub fn total_dimension(&self) -> usize {
self.total_dim
}
/// Number of vertices
pub fn num_vertices(&self) -> usize {
self.stalks.len()
}
/// Iterator over vertices
pub fn vertices(&self) -> impl Iterator<Item = NodeId> + '_ {
self.stalks.keys().copied()
}
}
impl Default for Sheaf {
fn default() -> Self {
Self::new()
}
}
impl std::fmt::Debug for Sheaf {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Sheaf")
.field("num_vertices", &self.stalks.len())
.field("num_restrictions", &self.restriction_maps.len())
.field("total_dimension", &self.total_dim)
.finish()
}
}
/// Builder for constructing sheaves
pub struct SheafBuilder {
sheaf: Sheaf,
}
impl SheafBuilder {
/// Create a new builder
pub fn new() -> Self {
Self {
sheaf: Sheaf::new(),
}
}
/// Add a stalk at a vertex
pub fn stalk(mut self, vertex: NodeId, dimension: usize) -> Self {
self.sheaf.add_stalk(vertex, Stalk::new(dimension));
self
}
/// Add an identity restriction between vertices
pub fn identity_restriction(mut self, source: NodeId, target: NodeId) -> Self {
let identity: RestrictionFn = Arc::new(|v: &Array1<f64>| v.clone());
self.sheaf.add_restriction(source, target, identity);
self
}
/// Add a scaling restriction
pub fn scaling_restriction(mut self, source: NodeId, target: NodeId, scale: f64) -> Self {
let scale_fn: RestrictionFn = Arc::new(move |v: &Array1<f64>| v * scale);
self.sheaf.add_restriction(source, target, scale_fn);
self
}
/// Add a projection restriction (select certain dimensions)
pub fn projection_restriction(
mut self,
source: NodeId,
target: NodeId,
indices: Vec<usize>,
) -> Self {
let proj_fn: RestrictionFn =
Arc::new(move |v: &Array1<f64>| Array1::from_iter(indices.iter().map(|&i| v[i])));
self.sheaf.add_restriction(source, target, proj_fn);
self
}
/// Add a linear restriction with a matrix
pub fn linear_restriction(
mut self,
source: NodeId,
target: NodeId,
matrix: Array2<f64>,
) -> Self {
let linear_fn: RestrictionFn = Arc::new(move |v: &Array1<f64>| matrix.dot(v));
self.sheaf.add_restriction(source, target, linear_fn);
self
}
/// Build the sheaf
pub fn build(self) -> Sheaf {
self.sheaf
}
}
impl Default for SheafBuilder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_sheaf_creation() {
let v0 = make_node_id();
let v1 = make_node_id();
let sheaf = SheafBuilder::new()
.stalk(v0, 3)
.stalk(v1, 3)
.identity_restriction(v0, v1)
.identity_restriction(v1, v0)
.build();
assert_eq!(sheaf.num_vertices(), 2);
assert_eq!(sheaf.total_dimension(), 6);
}
#[test]
fn test_consistent_section() {
let v0 = make_node_id();
let v1 = make_node_id();
let sheaf = SheafBuilder::new()
.stalk(v0, 2)
.stalk(v1, 2)
.identity_restriction(v0, v1)
.identity_restriction(v1, v0)
.build();
// Consistent section: same value at both vertices
let mut section = SheafSection::empty();
section.set(v0, Array1::from_vec(vec![1.0, 2.0]));
section.set(v1, Array1::from_vec(vec![1.0, 2.0]));
assert!(sheaf.is_consistent(&section, 1e-10));
}
#[test]
fn test_inconsistent_section() {
let v0 = make_node_id();
let v1 = make_node_id();
let sheaf = SheafBuilder::new()
.stalk(v0, 2)
.stalk(v1, 2)
.identity_restriction(v0, v1)
.identity_restriction(v1, v0)
.build();
// Inconsistent section: different values
let mut section = SheafSection::empty();
section.set(v0, Array1::from_vec(vec![1.0, 2.0]));
section.set(v1, Array1::from_vec(vec![3.0, 4.0]));
assert!(!sheaf.is_consistent(&section, 1e-10));
}
#[test]
fn test_edge_residual() {
let v0 = make_node_id();
let v1 = make_node_id();
let sheaf = SheafBuilder::new()
.stalk(v0, 2)
.stalk(v1, 2)
.identity_restriction(v0, v1)
.identity_restriction(v1, v0)
.build();
let mut section = SheafSection::empty();
section.set(v0, Array1::from_vec(vec![1.0, 2.0]));
section.set(v1, Array1::from_vec(vec![1.5, 2.5]));
let residual = sheaf.edge_residual(v0, v1, &section).unwrap();
// Residual should be [1.0, 2.0] - [1.5, 2.5] = [-0.5, -0.5]
assert!((residual[0] - (-0.5)).abs() < 1e-10);
assert!((residual[1] - (-0.5)).abs() < 1e-10);
}
}

View File

@@ -0,0 +1,597 @@
//! Simplicial Complex and Chain Complex Types
//!
//! This module provides the foundational types for simplicial complexes
//! and chain complexes used in cohomology computations.
use crate::substrate::NodeId;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::hash::{Hash, Hasher};
/// Unique identifier for a simplex
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SimplexId(pub u64);
impl SimplexId {
/// Create a new simplex ID
pub fn new(id: u64) -> Self {
Self(id)
}
/// Compute ID from vertex set (deterministic)
pub fn from_vertices(vertices: &BTreeSet<NodeId>) -> Self {
use std::collections::hash_map::DefaultHasher;
let mut hasher = DefaultHasher::new();
for v in vertices {
v.hash(&mut hasher);
}
Self(hasher.finish())
}
}
/// A simplex in a simplicial complex
///
/// An n-simplex is a set of n+1 vertices. For example:
/// - 0-simplex: a single vertex (node)
/// - 1-simplex: an edge (pair of nodes)
/// - 2-simplex: a triangle (triple of nodes)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Simplex {
/// Unique identifier
pub id: SimplexId,
/// Ordered set of vertices (using BTreeSet for canonical ordering)
pub vertices: BTreeSet<NodeId>,
/// Dimension of the simplex (number of vertices - 1)
pub dimension: usize,
/// Optional weight for weighted computations
pub weight: f64,
}
impl Simplex {
/// Create a new simplex from vertices
pub fn new(vertices: impl IntoIterator<Item = NodeId>) -> Self {
let vertices: BTreeSet<NodeId> = vertices.into_iter().collect();
let dimension = if vertices.is_empty() {
0
} else {
vertices.len() - 1
};
let id = SimplexId::from_vertices(&vertices);
Self {
id,
vertices,
dimension,
weight: 1.0,
}
}
/// Create a simplex with a specific weight
pub fn with_weight(mut self, weight: f64) -> Self {
self.weight = weight;
self
}
/// Get the boundary of this simplex (faces of dimension n-1)
///
/// The boundary of an n-simplex [v0, v1, ..., vn] is the alternating sum:
/// sum_{i=0}^n (-1)^i [v0, ..., v_{i-1}, v_{i+1}, ..., vn]
pub fn boundary(&self) -> Vec<(Simplex, i8)> {
if self.dimension == 0 {
return Vec::new();
}
let vertices: Vec<NodeId> = self.vertices.iter().copied().collect();
let mut faces = Vec::with_capacity(vertices.len());
for (i, _) in vertices.iter().enumerate() {
let mut face_vertices = BTreeSet::new();
for (j, &v) in vertices.iter().enumerate() {
if i != j {
face_vertices.insert(v);
}
}
let face = Simplex {
id: SimplexId::from_vertices(&face_vertices),
vertices: face_vertices,
dimension: self.dimension - 1,
weight: self.weight,
};
let sign = if i % 2 == 0 { 1i8 } else { -1i8 };
faces.push((face, sign));
}
faces
}
/// Check if this simplex contains a given vertex
pub fn contains_vertex(&self, vertex: NodeId) -> bool {
self.vertices.contains(&vertex)
}
/// Check if this simplex is a face of another simplex
pub fn is_face_of(&self, other: &Simplex) -> bool {
self.dimension < other.dimension && self.vertices.is_subset(&other.vertices)
}
/// Get the coboundary (simplices that have this as a face)
/// Note: This requires the containing simplicial complex to compute
pub fn vertices_as_vec(&self) -> Vec<NodeId> {
self.vertices.iter().copied().collect()
}
}
impl PartialEq for Simplex {
fn eq(&self, other: &Self) -> bool {
self.vertices == other.vertices
}
}
impl Eq for Simplex {}
impl Hash for Simplex {
fn hash<H: Hasher>(&self, state: &mut H) {
// Use ordered iteration for consistent hashing
for v in &self.vertices {
v.hash(state);
}
}
}
/// A simplicial complex built from a graph
///
/// Contains simplices of various dimensions and tracks the incidence relations.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SimplicialComplex {
/// Simplices organized by dimension
pub simplices: HashMap<usize, HashMap<SimplexId, Simplex>>,
/// Maximum dimension
pub max_dimension: usize,
/// Face relations: simplex -> its faces
face_map: HashMap<SimplexId, Vec<SimplexId>>,
/// Coface relations: simplex -> simplices it is a face of
coface_map: HashMap<SimplexId, Vec<SimplexId>>,
}
impl SimplicialComplex {
/// Create a new empty simplicial complex
pub fn new() -> Self {
Self {
simplices: HashMap::new(),
max_dimension: 0,
face_map: HashMap::new(),
coface_map: HashMap::new(),
}
}
/// Build a simplicial complex from a graph (flag complex / clique complex)
///
/// The flag complex has an n-simplex for every clique of n+1 vertices
pub fn from_graph_cliques(
nodes: &[NodeId],
edges: &[(NodeId, NodeId)],
max_dim: usize,
) -> Self {
let mut complex = Self::new();
// Build adjacency for clique detection
let mut adjacency: HashMap<NodeId, HashSet<NodeId>> = HashMap::new();
for &node in nodes {
adjacency.insert(node, HashSet::new());
}
for &(u, v) in edges {
adjacency.entry(u).or_default().insert(v);
adjacency.entry(v).or_default().insert(u);
}
// Add 0-simplices (vertices)
for &node in nodes {
let simplex = Simplex::new([node]);
complex.add_simplex(simplex);
}
// Add 1-simplices (edges)
for &(u, v) in edges {
let simplex = Simplex::new([u, v]);
complex.add_simplex(simplex);
}
// Find higher-dimensional cliques using Bron-Kerbosch algorithm
if max_dim >= 2 {
let all_nodes: HashSet<NodeId> = nodes.iter().copied().collect();
Self::find_cliques_recursive(
&mut complex,
&adjacency,
BTreeSet::new(),
all_nodes,
HashSet::new(),
max_dim,
);
}
complex.build_incidence_maps();
complex
}
/// Bron-Kerbosch algorithm for finding cliques
fn find_cliques_recursive(
complex: &mut SimplicialComplex,
adjacency: &HashMap<NodeId, HashSet<NodeId>>,
r: BTreeSet<NodeId>,
mut p: HashSet<NodeId>,
mut x: HashSet<NodeId>,
max_dim: usize,
) {
if p.is_empty() && x.is_empty() {
if r.len() >= 3 && r.len() <= max_dim + 1 {
let simplex = Simplex::new(r.iter().copied());
complex.add_simplex(simplex);
}
return;
}
let pivot = p.iter().chain(x.iter()).next().copied();
if let Some(pivot_node) = pivot {
let pivot_neighbors = adjacency.get(&pivot_node).cloned().unwrap_or_default();
let candidates: Vec<NodeId> = p.difference(&pivot_neighbors).copied().collect();
for v in candidates {
let v_neighbors = adjacency.get(&v).cloned().unwrap_or_default();
let mut new_r = r.clone();
new_r.insert(v);
let new_p: HashSet<NodeId> = p.intersection(&v_neighbors).copied().collect();
let new_x: HashSet<NodeId> = x.intersection(&v_neighbors).copied().collect();
Self::find_cliques_recursive(complex, adjacency, new_r, new_p, new_x, max_dim);
p.remove(&v);
x.insert(v);
}
}
}
/// Add a simplex to the complex
pub fn add_simplex(&mut self, simplex: Simplex) {
let dim = simplex.dimension;
self.max_dimension = self.max_dimension.max(dim);
self.simplices
.entry(dim)
.or_default()
.insert(simplex.id, simplex);
}
/// Build the face and coface incidence maps
fn build_incidence_maps(&mut self) {
self.face_map.clear();
self.coface_map.clear();
// For each simplex, compute its faces
for dim in 1..=self.max_dimension {
if let Some(simplices) = self.simplices.get(&dim) {
for (id, simplex) in simplices {
let faces = simplex.boundary();
let face_ids: Vec<SimplexId> = faces.iter().map(|(f, _)| f.id).collect();
self.face_map.insert(*id, face_ids.clone());
// Update coface map
for face_id in face_ids {
self.coface_map.entry(face_id).or_default().push(*id);
}
}
}
}
}
/// Get simplices of a specific dimension
pub fn simplices_of_dim(&self, dim: usize) -> impl Iterator<Item = &Simplex> {
self.simplices
.get(&dim)
.into_iter()
.flat_map(|s| s.values())
}
/// Get a simplex by ID
pub fn get_simplex(&self, id: SimplexId) -> Option<&Simplex> {
for simplices in self.simplices.values() {
if let Some(s) = simplices.get(&id) {
return Some(s);
}
}
None
}
/// Get the faces of a simplex
pub fn faces(&self, id: SimplexId) -> Option<&[SimplexId]> {
self.face_map.get(&id).map(|v| v.as_slice())
}
/// Get the cofaces (simplices that have this as a face)
pub fn cofaces(&self, id: SimplexId) -> Option<&[SimplexId]> {
self.coface_map.get(&id).map(|v| v.as_slice())
}
/// Count simplices of each dimension
pub fn simplex_counts(&self) -> Vec<usize> {
(0..=self.max_dimension)
.map(|d| self.simplices.get(&d).map(|s| s.len()).unwrap_or(0))
.collect()
}
/// Total number of simplices
pub fn total_simplices(&self) -> usize {
self.simplices.values().map(|s| s.len()).sum()
}
/// Euler characteristic: sum(-1)^n * |K_n|
pub fn euler_characteristic(&self) -> i64 {
let mut chi = 0i64;
for (dim, simplices) in &self.simplices {
let count = simplices.len() as i64;
if dim % 2 == 0 {
chi += count;
} else {
chi -= count;
}
}
chi
}
}
impl Default for SimplicialComplex {
fn default() -> Self {
Self::new()
}
}
/// A chain in the chain complex C_n(K)
///
/// Represents a formal sum of n-simplices with coefficients
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Chain {
/// Dimension of the chain
pub dimension: usize,
/// Simplex coefficients (simplex ID -> coefficient)
pub coefficients: HashMap<SimplexId, f64>,
}
impl Chain {
/// Create a zero chain of given dimension
pub fn zero(dimension: usize) -> Self {
Self {
dimension,
coefficients: HashMap::new(),
}
}
/// Create a chain from a single simplex
pub fn from_simplex(simplex: &Simplex, coefficient: f64) -> Self {
let mut coefficients = HashMap::new();
if coefficient.abs() > 1e-10 {
coefficients.insert(simplex.id, coefficient);
}
Self {
dimension: simplex.dimension,
coefficients,
}
}
/// Add a simplex to the chain
pub fn add_simplex(&mut self, id: SimplexId, coefficient: f64) {
if coefficient.abs() > 1e-10 {
*self.coefficients.entry(id).or_insert(0.0) += coefficient;
// Remove if coefficient is now essentially zero
if self
.coefficients
.get(&id)
.map(|c| c.abs() < 1e-10)
.unwrap_or(false)
{
self.coefficients.remove(&id);
}
}
}
/// Scale the chain by a constant
pub fn scale(&mut self, factor: f64) {
for coeff in self.coefficients.values_mut() {
*coeff *= factor;
}
}
/// Add another chain to this one
pub fn add(&mut self, other: &Chain) {
assert_eq!(
self.dimension, other.dimension,
"Chain dimensions must match"
);
for (&id, &coeff) in &other.coefficients {
self.add_simplex(id, coeff);
}
}
/// Check if chain is zero
pub fn is_zero(&self) -> bool {
self.coefficients.is_empty()
}
/// L2 norm of the chain
pub fn norm(&self) -> f64 {
self.coefficients
.values()
.map(|c| c * c)
.sum::<f64>()
.sqrt()
}
}
/// A cochain in the cochain complex C^n(K)
///
/// Represents a function from n-simplices to R
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Cochain {
/// Dimension of the cochain
pub dimension: usize,
/// Values on simplices (simplex ID -> value)
pub values: HashMap<SimplexId, f64>,
}
impl Cochain {
/// Create a zero cochain of given dimension
pub fn zero(dimension: usize) -> Self {
Self {
dimension,
values: HashMap::new(),
}
}
/// Create a cochain from values
pub fn from_values(dimension: usize, values: HashMap<SimplexId, f64>) -> Self {
Self { dimension, values }
}
/// Set the value on a simplex
pub fn set(&mut self, id: SimplexId, value: f64) {
if value.abs() > 1e-10 {
self.values.insert(id, value);
} else {
self.values.remove(&id);
}
}
/// Get the value on a simplex
pub fn get(&self, id: SimplexId) -> f64 {
self.values.get(&id).copied().unwrap_or(0.0)
}
/// Evaluate the cochain on a chain (inner product)
pub fn evaluate(&self, chain: &Chain) -> f64 {
assert_eq!(self.dimension, chain.dimension, "Dimensions must match");
let mut sum = 0.0;
for (&id, &coeff) in &chain.coefficients {
sum += coeff * self.get(id);
}
sum
}
/// Add another cochain to this one
pub fn add(&mut self, other: &Cochain) {
assert_eq!(
self.dimension, other.dimension,
"Cochain dimensions must match"
);
for (&id, &value) in &other.values {
let new_val = self.get(id) + value;
self.set(id, new_val);
}
}
/// Scale the cochain
pub fn scale(&mut self, factor: f64) {
for value in self.values.values_mut() {
*value *= factor;
}
}
/// L2 norm of the cochain
pub fn norm(&self) -> f64 {
self.values.values().map(|v| v * v).sum::<f64>().sqrt()
}
/// Check if cochain is zero
pub fn is_zero(&self) -> bool {
self.values.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
use uuid::Uuid;
fn make_node_id() -> NodeId {
Uuid::new_v4()
}
#[test]
fn test_simplex_creation() {
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
let vertex = Simplex::new([v0]);
assert_eq!(vertex.dimension, 0);
let edge = Simplex::new([v0, v1]);
assert_eq!(edge.dimension, 1);
let triangle = Simplex::new([v0, v1, v2]);
assert_eq!(triangle.dimension, 2);
}
#[test]
fn test_simplex_boundary() {
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
// Boundary of edge [v0, v1] = v1 - v0
let edge = Simplex::new([v0, v1]);
let boundary = edge.boundary();
assert_eq!(boundary.len(), 2);
// Boundary of triangle [v0, v1, v2] = [v1,v2] - [v0,v2] + [v0,v1]
let triangle = Simplex::new([v0, v1, v2]);
let boundary = triangle.boundary();
assert_eq!(boundary.len(), 3);
}
#[test]
fn test_simplicial_complex() {
let v0 = make_node_id();
let v1 = make_node_id();
let v2 = make_node_id();
let nodes = vec![v0, v1, v2];
let edges = vec![(v0, v1), (v1, v2), (v0, v2)];
let complex = SimplicialComplex::from_graph_cliques(&nodes, &edges, 2);
// Should have 3 vertices, 3 edges, 1 triangle
let counts = complex.simplex_counts();
assert_eq!(counts[0], 3);
assert_eq!(counts[1], 3);
assert_eq!(counts[2], 1);
// Euler characteristic: 3 - 3 + 1 = 1
assert_eq!(complex.euler_characteristic(), 1);
}
#[test]
fn test_chain_operations() {
let v0 = make_node_id();
let v1 = make_node_id();
let simplex = Simplex::new([v0, v1]);
let mut chain = Chain::from_simplex(&simplex, 2.0);
assert_eq!(chain.dimension, 1);
assert!(!chain.is_zero());
chain.scale(0.5);
assert_eq!(chain.coefficients.get(&simplex.id), Some(&1.0));
}
#[test]
fn test_cochain_evaluation() {
let v0 = make_node_id();
let v1 = make_node_id();
let simplex = Simplex::new([v0, v1]);
let chain = Chain::from_simplex(&simplex, 3.0);
let mut cochain = Cochain::zero(1);
cochain.set(simplex.id, 2.0);
// Inner product: 3.0 * 2.0 = 6.0
assert!((cochain.evaluate(&chain) - 6.0).abs() < 1e-10);
}
}