Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,759 @@
//! CoherenceEnergy Value Object
//!
//! Represents the coherence energy computed from sheaf Laplacian residuals.
//! The energy formula is: E(S) = sum(w_e * |r_e|^2) where r_e = rho_u(x_u) - rho_v(x_v)
//!
//! This module provides immutable value objects for:
//! - Total system energy (lower = more coherent)
//! - Per-edge energies for localization
//! - Per-scope energies for hierarchical analysis
//! - Hotspot identification (highest energy edges)
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Unique identifier for an edge in the sheaf graph
pub type EdgeId = String;
/// Unique identifier for a scope/namespace
pub type ScopeId = String;
/// Energy associated with a single edge
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct EdgeEnergy {
/// Edge identifier
pub edge_id: EdgeId,
/// Source node identifier
pub source: String,
/// Target node identifier
pub target: String,
/// Weighted residual energy: w_e * |r_e|^2
pub energy: f32,
/// Raw residual vector (for debugging/analysis)
pub residual: Vec<f32>,
/// Residual norm squared: |r_e|^2
pub residual_norm_sq: f32,
/// Edge weight
pub weight: f32,
}
impl EdgeEnergy {
/// Create a new edge energy
#[inline]
pub fn new(
edge_id: impl Into<EdgeId>,
source: impl Into<String>,
target: impl Into<String>,
residual: Vec<f32>,
weight: f32,
) -> Self {
let residual_norm_sq = compute_norm_sq(&residual);
let energy = weight * residual_norm_sq;
Self {
edge_id: edge_id.into(),
source: source.into(),
target: target.into(),
energy,
residual,
residual_norm_sq,
weight,
}
}
/// Create edge energy without storing residual (lightweight version)
/// Use this when the residual vector is not needed for debugging/analysis
#[inline]
pub fn new_lightweight(
edge_id: impl Into<EdgeId>,
source: impl Into<String>,
target: impl Into<String>,
residual_norm_sq: f32,
weight: f32,
) -> Self {
Self {
edge_id: edge_id.into(),
source: source.into(),
target: target.into(),
energy: weight * residual_norm_sq,
residual: Vec::new(),
residual_norm_sq,
weight,
}
}
/// Check if this edge has significant energy (above threshold)
#[inline]
pub fn is_significant(&self, threshold: f32) -> bool {
self.energy > threshold
}
/// Get the contribution ratio to total energy
#[inline]
pub fn contribution_ratio(&self, total_energy: f32) -> f32 {
if total_energy > 0.0 {
self.energy / total_energy
} else {
0.0
}
}
}
/// Energy aggregated by scope/namespace
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ScopeEnergy {
/// Scope identifier
pub scope_id: ScopeId,
/// Total energy within this scope
pub energy: f32,
/// Number of edges in this scope
pub edge_count: usize,
/// Average energy per edge
pub average_energy: f32,
/// Maximum single edge energy
pub max_edge_energy: f32,
/// Edge ID with maximum energy (hotspot)
pub hotspot_edge: Option<EdgeId>,
}
impl ScopeEnergy {
/// Create a new scope energy from edge energies
pub fn from_edges(scope_id: impl Into<ScopeId>, edge_energies: &[&EdgeEnergy]) -> Self {
let scope_id = scope_id.into();
let edge_count = edge_energies.len();
let energy: f32 = edge_energies.iter().map(|e| e.energy).sum();
let average_energy = if edge_count > 0 {
energy / edge_count as f32
} else {
0.0
};
let (max_edge_energy, hotspot_edge) = edge_energies
.iter()
.max_by(|a, b| {
a.energy
.partial_cmp(&b.energy)
.unwrap_or(std::cmp::Ordering::Equal)
})
.map(|e| (e.energy, Some(e.edge_id.clone())))
.unwrap_or((0.0, None));
Self {
scope_id,
energy,
edge_count,
average_energy,
max_edge_energy,
hotspot_edge,
}
}
/// Check if this scope has coherence issues
#[inline]
pub fn is_incoherent(&self, threshold: f32) -> bool {
self.energy > threshold
}
}
/// Information about a coherence hotspot (high-energy region)
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct HotspotInfo {
/// Edge identifier
pub edge_id: EdgeId,
/// Energy value
pub energy: f32,
/// Source node
pub source: String,
/// Target node
pub target: String,
/// Rank (1 = highest energy)
pub rank: usize,
/// Percentage of total energy
pub percentage: f32,
}
/// Snapshot of coherence energy at a specific timestamp
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnergySnapshot {
/// Total system energy
pub total_energy: f32,
/// Timestamp of computation
pub timestamp: DateTime<Utc>,
/// Content fingerprint for staleness detection
pub fingerprint: String,
}
impl EnergySnapshot {
/// Create a new energy snapshot
pub fn new(total_energy: f32, fingerprint: impl Into<String>) -> Self {
Self {
total_energy,
timestamp: Utc::now(),
fingerprint: fingerprint.into(),
}
}
/// Check if this snapshot is stale compared to a fingerprint
#[inline]
pub fn is_stale(&self, current_fingerprint: &str) -> bool {
self.fingerprint != current_fingerprint
}
/// Get the age of this snapshot in milliseconds
pub fn age_ms(&self) -> i64 {
let now = Utc::now();
(now - self.timestamp).num_milliseconds()
}
}
/// Global coherence energy: E(S) = sum(w_e * |r_e|^2)
///
/// This is the main value object representing the coherence state of the entire system.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CoherenceEnergy {
/// Total system energy (lower = more coherent)
pub total_energy: f32,
/// Per-edge energies for localization
pub edge_energies: HashMap<EdgeId, EdgeEnergy>,
/// Energy by scope/namespace
pub scope_energies: HashMap<ScopeId, ScopeEnergy>,
/// Computation timestamp
pub computed_at: DateTime<Utc>,
/// Fingerprint for change detection (Blake3 hash)
pub fingerprint: String,
/// Number of edges computed
pub edge_count: usize,
/// Number of nodes in the graph
pub node_count: usize,
}
impl CoherenceEnergy {
/// Create a new coherence energy result
pub fn new(
edge_energies: HashMap<EdgeId, EdgeEnergy>,
scope_mapping: &HashMap<EdgeId, ScopeId>,
node_count: usize,
fingerprint: impl Into<String>,
) -> Self {
let total_energy: f32 = edge_energies.values().map(|e| e.energy).sum();
let edge_count = edge_energies.len();
// Aggregate by scope
let scope_energies = Self::aggregate_by_scope(&edge_energies, scope_mapping);
Self {
total_energy,
edge_energies,
scope_energies,
computed_at: Utc::now(),
fingerprint: fingerprint.into(),
edge_count,
node_count,
}
}
/// Create an empty coherence energy
pub fn empty() -> Self {
Self {
total_energy: 0.0,
edge_energies: HashMap::new(),
scope_energies: HashMap::new(),
computed_at: Utc::now(),
fingerprint: String::new(),
edge_count: 0,
node_count: 0,
}
}
/// Check if the system is coherent (energy below threshold)
#[inline]
pub fn is_coherent(&self, threshold: f32) -> bool {
self.total_energy < threshold
}
/// Get the average energy per edge
#[inline]
pub fn average_edge_energy(&self) -> f32 {
if self.edge_count > 0 {
self.total_energy / self.edge_count as f32
} else {
0.0
}
}
/// Get energy for a specific scope
pub fn scope_energy_for(&self, scope_id: &str) -> f32 {
self.scope_energies
.get(scope_id)
.map(|s| s.energy)
.unwrap_or(0.0)
}
/// Identify the top-k hotspots (highest energy edges)
pub fn hotspots(&self, k: usize) -> Vec<HotspotInfo> {
let mut sorted: Vec<_> = self.edge_energies.values().collect();
sorted.sort_by(|a, b| {
b.energy
.partial_cmp(&a.energy)
.unwrap_or(std::cmp::Ordering::Equal)
});
sorted
.into_iter()
.take(k)
.enumerate()
.map(|(i, e)| HotspotInfo {
edge_id: e.edge_id.clone(),
energy: e.energy,
source: e.source.clone(),
target: e.target.clone(),
rank: i + 1,
percentage: if self.total_energy > 0.0 {
(e.energy / self.total_energy) * 100.0
} else {
0.0
},
})
.collect()
}
/// Get all edges with energy above threshold
pub fn high_energy_edges(&self, threshold: f32) -> Vec<&EdgeEnergy> {
self.edge_energies
.values()
.filter(|e| e.energy > threshold)
.collect()
}
/// Create a snapshot of the current energy state
pub fn snapshot(&self) -> EnergySnapshot {
EnergySnapshot {
total_energy: self.total_energy,
timestamp: self.computed_at,
fingerprint: self.fingerprint.clone(),
}
}
/// Get the energy distribution statistics
pub fn statistics(&self) -> EnergyStatistics {
if self.edge_energies.is_empty() {
return EnergyStatistics::default();
}
let energies: Vec<f32> = self.edge_energies.values().map(|e| e.energy).collect();
let min = energies
.iter()
.copied()
.min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.unwrap_or(0.0);
let max = energies
.iter()
.copied()
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
.unwrap_or(0.0);
let mean = self.average_edge_energy();
// Compute standard deviation
let variance: f32 =
energies.iter().map(|e| (e - mean).powi(2)).sum::<f32>() / energies.len() as f32;
let std_dev = variance.sqrt();
// Compute median
let mut sorted_energies = energies.clone();
sorted_energies.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let median = if sorted_energies.len() % 2 == 0 {
let mid = sorted_energies.len() / 2;
(sorted_energies[mid - 1] + sorted_energies[mid]) / 2.0
} else {
sorted_energies[sorted_energies.len() / 2]
};
EnergyStatistics {
min,
max,
mean,
median,
std_dev,
count: self.edge_count,
}
}
/// Aggregate edge energies by scope
fn aggregate_by_scope(
edge_energies: &HashMap<EdgeId, EdgeEnergy>,
scope_mapping: &HashMap<EdgeId, ScopeId>,
) -> HashMap<ScopeId, ScopeEnergy> {
// Group edges by scope
let mut scope_groups: HashMap<ScopeId, Vec<&EdgeEnergy>> = HashMap::new();
for (edge_id, edge_energy) in edge_energies {
let scope_id = scope_mapping
.get(edge_id)
.cloned()
.unwrap_or_else(|| "default".to_string());
scope_groups.entry(scope_id).or_default().push(edge_energy);
}
// Create scope energies
scope_groups
.into_iter()
.map(|(scope_id, edges)| {
let scope_energy = ScopeEnergy::from_edges(&scope_id, &edges);
(scope_id, scope_energy)
})
.collect()
}
}
/// Statistical summary of energy distribution
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct EnergyStatistics {
/// Minimum edge energy
pub min: f32,
/// Maximum edge energy
pub max: f32,
/// Mean edge energy
pub mean: f32,
/// Median edge energy
pub median: f32,
/// Standard deviation
pub std_dev: f32,
/// Number of edges
pub count: usize,
}
/// Compute the squared L2 norm of a vector
///
/// Uses SIMD optimization when available via the `simd` feature.
/// For small vectors (<= 8), uses unrolled scalar loop for better performance.
#[inline]
pub fn compute_norm_sq(v: &[f32]) -> f32 {
let len = v.len();
// Fast path for small vectors - avoid SIMD overhead
if len <= 8 {
let mut sum = 0.0f32;
for &x in v {
sum += x * x;
}
return sum;
}
#[cfg(feature = "simd")]
{
compute_norm_sq_simd(v)
}
#[cfg(not(feature = "simd"))]
{
compute_norm_sq_unrolled(v)
}
}
/// Unrolled scalar computation for non-SIMD builds
#[cfg(not(feature = "simd"))]
#[inline]
fn compute_norm_sq_unrolled(v: &[f32]) -> f32 {
let chunks = v.chunks_exact(4);
let remainder = chunks.remainder();
let mut acc0 = 0.0f32;
let mut acc1 = 0.0f32;
let mut acc2 = 0.0f32;
let mut acc3 = 0.0f32;
for chunk in chunks {
acc0 += chunk[0] * chunk[0];
acc1 += chunk[1] * chunk[1];
acc2 += chunk[2] * chunk[2];
acc3 += chunk[3] * chunk[3];
}
let mut sum = acc0 + acc1 + acc2 + acc3;
for &x in remainder {
sum += x * x;
}
sum
}
/// SIMD-optimized squared norm computation
#[cfg(feature = "simd")]
fn compute_norm_sq_simd(v: &[f32]) -> f32 {
use wide::f32x8;
let chunks = v.chunks_exact(8);
let remainder = chunks.remainder();
let mut sum = f32x8::ZERO;
for chunk in chunks {
let vals = f32x8::from(<[f32; 8]>::try_from(chunk).unwrap());
sum += vals * vals;
}
let mut total: f32 = sum.reduce_add();
// Handle remainder
for &val in remainder {
total += val * val;
}
total
}
/// Compute the residual between two projected states
///
/// r_e = rho_u(x_u) - rho_v(x_v)
#[inline]
pub fn compute_residual(projected_source: &[f32], projected_target: &[f32]) -> Vec<f32> {
debug_assert_eq!(
projected_source.len(),
projected_target.len(),
"Projected vectors must have same dimension"
);
let len = projected_source.len();
let mut result = Vec::with_capacity(len);
#[cfg(feature = "simd")]
{
result = compute_residual_simd(projected_source, projected_target);
}
#[cfg(not(feature = "simd"))]
{
// Unrolled loop for better vectorization
let chunks_a = projected_source.chunks_exact(4);
let chunks_b = projected_target.chunks_exact(4);
let rem_a = chunks_a.remainder();
let rem_b = chunks_b.remainder();
for (ca, cb) in chunks_a.zip(chunks_b) {
result.push(ca[0] - cb[0]);
result.push(ca[1] - cb[1]);
result.push(ca[2] - cb[2]);
result.push(ca[3] - cb[3]);
}
for (&a, &b) in rem_a.iter().zip(rem_b.iter()) {
result.push(a - b);
}
}
result
}
/// Compute residual into pre-allocated buffer (zero allocation)
#[inline]
pub fn compute_residual_into(
projected_source: &[f32],
projected_target: &[f32],
result: &mut [f32],
) {
debug_assert_eq!(
projected_source.len(),
projected_target.len(),
"Projected vectors must have same dimension"
);
debug_assert_eq!(
result.len(),
projected_source.len(),
"Result buffer size mismatch"
);
// Unrolled loop for better vectorization
let len = projected_source.len();
let chunks = len / 4;
for i in 0..chunks {
let base = i * 4;
result[base] = projected_source[base] - projected_target[base];
result[base + 1] = projected_source[base + 1] - projected_target[base + 1];
result[base + 2] = projected_source[base + 2] - projected_target[base + 2];
result[base + 3] = projected_source[base + 3] - projected_target[base + 3];
}
for i in (chunks * 4)..len {
result[i] = projected_source[i] - projected_target[i];
}
}
/// Compute residual norm squared directly without allocating residual vector
/// This is the most efficient path when the residual vector itself is not needed
#[inline]
pub fn compute_residual_norm_sq(projected_source: &[f32], projected_target: &[f32]) -> f32 {
debug_assert_eq!(
projected_source.len(),
projected_target.len(),
"Projected vectors must have same dimension"
);
let len = projected_source.len();
// Fast path for small vectors
if len <= 8 {
let mut sum = 0.0f32;
for (&a, &b) in projected_source.iter().zip(projected_target.iter()) {
let d = a - b;
sum += d * d;
}
return sum;
}
// Unrolled loop with 4 accumulators for ILP
let chunks = len / 4;
let mut acc0 = 0.0f32;
let mut acc1 = 0.0f32;
let mut acc2 = 0.0f32;
let mut acc3 = 0.0f32;
for i in 0..chunks {
let base = i * 4;
let d0 = projected_source[base] - projected_target[base];
let d1 = projected_source[base + 1] - projected_target[base + 1];
let d2 = projected_source[base + 2] - projected_target[base + 2];
let d3 = projected_source[base + 3] - projected_target[base + 3];
acc0 += d0 * d0;
acc1 += d1 * d1;
acc2 += d2 * d2;
acc3 += d3 * d3;
}
let mut sum = acc0 + acc1 + acc2 + acc3;
// Handle remainder
for i in (chunks * 4)..len {
let d = projected_source[i] - projected_target[i];
sum += d * d;
}
sum
}
/// SIMD-optimized residual computation
#[cfg(feature = "simd")]
fn compute_residual_simd(a: &[f32], b: &[f32]) -> Vec<f32> {
use wide::f32x8;
let mut result = vec![0.0f32; a.len()];
let chunks_a = a.chunks_exact(8);
let chunks_b = b.chunks_exact(8);
let chunks_r = result.chunks_exact_mut(8);
let remainder_a = chunks_a.remainder();
let remainder_b = chunks_b.remainder();
for ((chunk_a, chunk_b), chunk_r) in chunks_a.zip(chunks_b).zip(chunks_r) {
let va = f32x8::from(<[f32; 8]>::try_from(chunk_a).unwrap());
let vb = f32x8::from(<[f32; 8]>::try_from(chunk_b).unwrap());
let diff = va - vb;
let arr: [f32; 8] = diff.into();
chunk_r.copy_from_slice(&arr);
}
// Handle remainder
let offset = a.len() - remainder_a.len();
for (i, (&va, &vb)) in remainder_a.iter().zip(remainder_b.iter()).enumerate() {
result[offset + i] = va - vb;
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_edge_energy_creation() {
let residual = vec![1.0, 0.0, 0.0];
let edge = EdgeEnergy::new("e1", "n1", "n2", residual, 2.0);
assert_eq!(edge.edge_id, "e1");
assert_eq!(edge.residual_norm_sq, 1.0);
assert_eq!(edge.energy, 2.0); // weight * norm_sq
}
#[test]
fn test_coherence_energy_hotspots() {
let mut edge_energies = HashMap::new();
edge_energies.insert(
"e1".to_string(),
EdgeEnergy::new("e1", "n1", "n2", vec![1.0], 1.0),
);
edge_energies.insert(
"e2".to_string(),
EdgeEnergy::new("e2", "n2", "n3", vec![2.0], 1.0),
);
edge_energies.insert(
"e3".to_string(),
EdgeEnergy::new("e3", "n3", "n4", vec![3.0], 1.0),
);
let scope_mapping = HashMap::new();
let energy = CoherenceEnergy::new(edge_energies, &scope_mapping, 4, "fp1");
let hotspots = energy.hotspots(2);
assert_eq!(hotspots.len(), 2);
assert_eq!(hotspots[0].edge_id, "e3"); // highest energy
assert_eq!(hotspots[1].edge_id, "e2");
}
#[test]
fn test_compute_norm_sq() {
let v = vec![3.0, 4.0];
assert_eq!(compute_norm_sq(&v), 25.0);
let v = vec![1.0, 2.0, 2.0];
assert_eq!(compute_norm_sq(&v), 9.0);
}
#[test]
fn test_compute_residual() {
let a = vec![1.0, 2.0, 3.0];
let b = vec![0.5, 1.0, 2.0];
let r = compute_residual(&a, &b);
assert_eq!(r.len(), 3);
assert!((r[0] - 0.5).abs() < 1e-6);
assert!((r[1] - 1.0).abs() < 1e-6);
assert!((r[2] - 1.0).abs() < 1e-6);
}
#[test]
fn test_energy_statistics() {
let mut edge_energies = HashMap::new();
edge_energies.insert(
"e1".to_string(),
EdgeEnergy::new("e1", "n1", "n2", vec![1.0], 1.0),
);
edge_energies.insert(
"e2".to_string(),
EdgeEnergy::new("e2", "n2", "n3", vec![2.0], 1.0),
);
edge_energies.insert(
"e3".to_string(),
EdgeEnergy::new("e3", "n3", "n4", vec![3.0], 1.0),
);
let scope_mapping = HashMap::new();
let energy = CoherenceEnergy::new(edge_energies, &scope_mapping, 4, "fp1");
let stats = energy.statistics();
assert_eq!(stats.count, 3);
assert_eq!(stats.min, 1.0);
assert_eq!(stats.max, 9.0);
}
#[test]
fn test_scope_energy_aggregation() {
let e1 = EdgeEnergy::new("e1", "n1", "n2", vec![1.0], 1.0);
let e2 = EdgeEnergy::new("e2", "n2", "n3", vec![2.0], 1.0);
let scope = ScopeEnergy::from_edges("scope1", &[&e1, &e2]);
assert_eq!(scope.edge_count, 2);
assert_eq!(scope.energy, 5.0); // 1 + 4
assert_eq!(scope.hotspot_edge, Some("e2".to_string()));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,616 @@
//! Energy History Tracking
//!
//! This module provides time-series tracking of coherence energy for trend analysis,
//! anomaly detection, and adaptive threshold tuning.
//!
//! # Features
//!
//! - Rolling window of energy snapshots
//! - Trend detection (increasing, decreasing, stable)
//! - Anomaly detection using statistical methods
//! - Persistence tracking for threshold tuning
//!
//! # Example
//!
//! ```rust,ignore
//! use prime_radiant::coherence::{EnergyHistory, EnergyHistoryConfig};
//!
//! let mut history = EnergyHistory::new(EnergyHistoryConfig::default());
//!
//! // Record energy values
//! history.record(1.0);
//! history.record(1.2);
//! history.record(1.5);
//!
//! // Get trend
//! let trend = history.trend();
//! println!("Energy is {:?}", trend.direction);
//! ```
use chrono::{DateTime, Duration, Utc};
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
/// Configuration for energy history tracking
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnergyHistoryConfig {
/// Maximum number of entries to keep
pub max_entries: usize,
/// Window size for trend calculation
pub trend_window: usize,
/// Threshold for persistence detection (seconds)
pub persistence_window_secs: u64,
/// Number of standard deviations for anomaly detection
pub anomaly_sigma: f32,
/// Minimum entries before trend analysis
pub min_entries: usize,
}
impl Default for EnergyHistoryConfig {
fn default() -> Self {
Self {
max_entries: 1000,
trend_window: 10,
persistence_window_secs: 300, // 5 minutes
anomaly_sigma: 3.0,
min_entries: 5,
}
}
}
/// Direction of energy trend
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum TrendDirection {
/// Energy is increasing
Increasing,
/// Energy is decreasing
Decreasing,
/// Energy is relatively stable
Stable,
/// Not enough data to determine trend
Unknown,
}
/// Result of trend analysis
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnergyTrend {
/// Direction of the trend
pub direction: TrendDirection,
/// Slope of the trend line (energy units per second)
pub slope: f32,
/// R-squared value indicating trend fit quality
pub r_squared: f32,
/// Average energy in the window
pub mean: f32,
/// Standard deviation in the window
pub std_dev: f32,
/// Window size used
pub window_size: usize,
}
impl EnergyTrend {
/// Check if the trend is concerning (increasing significantly)
pub fn is_concerning(&self, threshold: f32) -> bool {
self.direction == TrendDirection::Increasing && self.slope > threshold
}
/// Check if the trend is improving
pub fn is_improving(&self) -> bool {
self.direction == TrendDirection::Decreasing && self.r_squared > 0.5
}
}
/// An entry in the energy history
#[derive(Debug, Clone, Serialize, Deserialize)]
struct HistoryEntry {
/// Energy value
energy: f32,
/// Timestamp
timestamp: DateTime<Utc>,
/// Whether this was an anomaly
is_anomaly: bool,
}
/// Time-series tracker for coherence energy
#[derive(Debug)]
pub struct EnergyHistory {
/// Configuration
config: EnergyHistoryConfig,
/// History entries
entries: VecDeque<HistoryEntry>,
/// Running sum for efficient mean calculation
running_sum: f64,
/// Running sum of squares for efficient variance
running_sum_sq: f64,
/// Last computed trend
last_trend: Option<EnergyTrend>,
/// Statistics
total_entries: u64,
anomaly_count: u64,
}
impl EnergyHistory {
/// Create a new energy history tracker
pub fn new(config: EnergyHistoryConfig) -> Self {
Self {
config,
entries: VecDeque::new(),
running_sum: 0.0,
running_sum_sq: 0.0,
last_trend: None,
total_entries: 0,
anomaly_count: 0,
}
}
/// Record a new energy value
pub fn record(&mut self, energy: f32) {
self.record_at(energy, Utc::now());
}
/// Record an energy value at a specific time
pub fn record_at(&mut self, energy: f32, timestamp: DateTime<Utc>) {
// Check for anomaly before updating stats
let is_anomaly = self.is_anomaly(energy);
if is_anomaly {
self.anomaly_count += 1;
}
// Create entry
let entry = HistoryEntry {
energy,
timestamp,
is_anomaly,
};
// Update running statistics
self.running_sum += energy as f64;
self.running_sum_sq += (energy as f64) * (energy as f64);
// Add to history
self.entries.push_back(entry);
self.total_entries += 1;
// Trim if necessary
while self.entries.len() > self.config.max_entries {
if let Some(old) = self.entries.pop_front() {
self.running_sum -= old.energy as f64;
self.running_sum_sq -= (old.energy as f64) * (old.energy as f64);
}
}
// Invalidate cached trend
self.last_trend = None;
}
/// Get the current energy value
pub fn current(&self) -> Option<f32> {
self.entries.back().map(|e| e.energy)
}
/// Get the previous energy value
pub fn previous(&self) -> Option<f32> {
if self.entries.len() >= 2 {
self.entries.get(self.entries.len() - 2).map(|e| e.energy)
} else {
None
}
}
/// Get the change from previous to current
pub fn delta(&self) -> Option<f32> {
match (self.current(), self.previous()) {
(Some(curr), Some(prev)) => Some(curr - prev),
_ => None,
}
}
/// Get the mean energy
pub fn mean(&self) -> f32 {
if self.entries.is_empty() {
0.0
} else {
(self.running_sum / self.entries.len() as f64) as f32
}
}
/// Get the standard deviation
pub fn std_dev(&self) -> f32 {
let n = self.entries.len();
if n < 2 {
return 0.0;
}
let mean = self.running_sum / n as f64;
let variance = (self.running_sum_sq / n as f64) - (mean * mean);
if variance > 0.0 {
(variance.sqrt()) as f32
} else {
0.0
}
}
/// Get the minimum energy value
pub fn min(&self) -> Option<f32> {
self.entries
.iter()
.map(|e| e.energy)
.min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
}
/// Get the maximum energy value
pub fn max(&self) -> Option<f32> {
self.entries
.iter()
.map(|e| e.energy)
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
}
/// Compute the current trend
pub fn trend(&mut self) -> EnergyTrend {
if let Some(ref trend) = self.last_trend {
return trend.clone();
}
let trend = self.compute_trend();
self.last_trend = Some(trend.clone());
trend
}
/// Check if energy has been above threshold for persistence window
pub fn is_above_threshold_persistent(&self, threshold: f32) -> bool {
let window = Duration::seconds(self.config.persistence_window_secs as i64);
let cutoff = Utc::now() - window;
// Check all entries within the persistence window
let recent: Vec<_> = self
.entries
.iter()
.rev()
.take_while(|e| e.timestamp >= cutoff)
.collect();
if recent.is_empty() {
return false;
}
// All entries must be above threshold
recent.iter().all(|e| e.energy > threshold)
}
/// Check if energy has been below threshold for persistence window
pub fn is_below_threshold_persistent(&self, threshold: f32) -> bool {
let window = Duration::seconds(self.config.persistence_window_secs as i64);
let cutoff = Utc::now() - window;
let recent: Vec<_> = self
.entries
.iter()
.rev()
.take_while(|e| e.timestamp >= cutoff)
.collect();
if recent.is_empty() {
return false;
}
recent.iter().all(|e| e.energy < threshold)
}
/// Get entries in the persistence window
pub fn recent_entries(&self, seconds: u64) -> Vec<(f32, DateTime<Utc>)> {
let window = Duration::seconds(seconds as i64);
let cutoff = Utc::now() - window;
self.entries
.iter()
.rev()
.take_while(|e| e.timestamp >= cutoff)
.map(|e| (e.energy, e.timestamp))
.collect()
}
/// Get the number of entries
#[inline]
pub fn len(&self) -> usize {
self.entries.len()
}
/// Check if history is empty
#[inline]
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
/// Get total entries ever recorded
#[inline]
pub fn total_entries(&self) -> u64 {
self.total_entries
}
/// Get anomaly count
#[inline]
pub fn anomaly_count(&self) -> u64 {
self.anomaly_count
}
/// Get anomaly rate
pub fn anomaly_rate(&self) -> f32 {
if self.total_entries > 0 {
self.anomaly_count as f32 / self.total_entries as f32
} else {
0.0
}
}
/// Clear all history
pub fn clear(&mut self) {
self.entries.clear();
self.running_sum = 0.0;
self.running_sum_sq = 0.0;
self.last_trend = None;
}
// Private methods
fn is_anomaly(&self, energy: f32) -> bool {
if self.entries.len() < self.config.min_entries {
return false;
}
let mean = self.mean();
let std_dev = self.std_dev();
if std_dev < 1e-10 {
return false;
}
let z_score = ((energy - mean) / std_dev).abs();
z_score > self.config.anomaly_sigma
}
fn compute_trend(&self) -> EnergyTrend {
let window_size = self.config.trend_window.min(self.entries.len());
if window_size < self.config.min_entries {
return EnergyTrend {
direction: TrendDirection::Unknown,
slope: 0.0,
r_squared: 0.0,
mean: self.mean(),
std_dev: self.std_dev(),
window_size,
};
}
// Get recent entries
let recent: Vec<_> = self.entries.iter().rev().take(window_size).collect();
// Linear regression: y = mx + b
// x is the index, y is the energy value
let n = recent.len() as f64;
let mut sum_x = 0.0;
let mut sum_y = 0.0;
let mut sum_xy = 0.0;
let mut sum_xx = 0.0;
for (i, entry) in recent.iter().rev().enumerate() {
let x = i as f64;
let y = entry.energy as f64;
sum_x += x;
sum_y += y;
sum_xy += x * y;
sum_xx += x * x;
}
// Compute slope
let slope = if (n * sum_xx - sum_x * sum_x).abs() > 1e-10 {
((n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x)) as f32
} else {
0.0
};
// Compute R-squared
let mean_y = sum_y / n;
let mut ss_tot = 0.0;
let mut ss_res = 0.0;
let b = (sum_y - slope as f64 * sum_x) / n;
for (i, entry) in recent.iter().rev().enumerate() {
let x = i as f64;
let y = entry.energy as f64;
let y_pred = slope as f64 * x + b;
ss_tot += (y - mean_y).powi(2);
ss_res += (y - y_pred).powi(2);
}
let r_squared = if ss_tot > 1e-10 {
(1.0 - ss_res / ss_tot) as f32
} else {
0.0
};
// Determine direction
let direction = if slope.abs() < 0.001 {
TrendDirection::Stable
} else if slope > 0.0 {
TrendDirection::Increasing
} else {
TrendDirection::Decreasing
};
// Compute window stats
let window_sum: f64 = recent.iter().map(|e| e.energy as f64).sum();
let window_mean = (window_sum / n) as f32;
let window_var: f64 = recent
.iter()
.map(|e| {
let diff = e.energy as f64 - window_sum / n;
diff * diff
})
.sum::<f64>()
/ n;
let window_std_dev = (window_var.sqrt()) as f32;
EnergyTrend {
direction,
slope,
r_squared,
mean: window_mean,
std_dev: window_std_dev,
window_size,
}
}
}
impl Default for EnergyHistory {
fn default() -> Self {
Self::new(EnergyHistoryConfig::default())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_history_creation() {
let history = EnergyHistory::default();
assert!(history.is_empty());
assert_eq!(history.len(), 0);
}
#[test]
fn test_record_energy() {
let mut history = EnergyHistory::default();
history.record(1.0);
history.record(2.0);
history.record(3.0);
assert_eq!(history.len(), 3);
assert_eq!(history.current(), Some(3.0));
assert_eq!(history.previous(), Some(2.0));
assert_eq!(history.delta(), Some(1.0));
}
#[test]
fn test_statistics() {
let mut history = EnergyHistory::default();
history.record(1.0);
history.record(2.0);
history.record(3.0);
history.record(4.0);
history.record(5.0);
assert_eq!(history.mean(), 3.0);
assert_eq!(history.min(), Some(1.0));
assert_eq!(history.max(), Some(5.0));
}
#[test]
fn test_trend_increasing() {
let mut history = EnergyHistory::new(EnergyHistoryConfig {
min_entries: 3,
trend_window: 5,
..Default::default()
});
for i in 0..10 {
history.record(i as f32);
}
let trend = history.trend();
assert_eq!(trend.direction, TrendDirection::Increasing);
assert!(trend.slope > 0.0);
}
#[test]
fn test_trend_decreasing() {
let mut history = EnergyHistory::new(EnergyHistoryConfig {
min_entries: 3,
trend_window: 5,
..Default::default()
});
for i in (0..10).rev() {
history.record(i as f32);
}
let trend = history.trend();
assert_eq!(trend.direction, TrendDirection::Decreasing);
assert!(trend.slope < 0.0);
}
#[test]
fn test_trend_stable() {
let mut history = EnergyHistory::new(EnergyHistoryConfig {
min_entries: 3,
trend_window: 5,
..Default::default()
});
for _ in 0..10 {
history.record(5.0);
}
let trend = history.trend();
assert_eq!(trend.direction, TrendDirection::Stable);
assert!(trend.slope.abs() < 0.01);
}
#[test]
fn test_anomaly_detection() {
let config = EnergyHistoryConfig {
anomaly_sigma: 2.0,
min_entries: 5,
..Default::default()
};
let mut history = EnergyHistory::new(config);
// Add normal values
for _ in 0..10 {
history.record(5.0);
}
// Add anomaly
history.record(100.0);
assert!(history.anomaly_count() > 0);
}
#[test]
fn test_history_trimming() {
let config = EnergyHistoryConfig {
max_entries: 5,
..Default::default()
};
let mut history = EnergyHistory::new(config);
for i in 0..10 {
history.record(i as f32);
}
assert_eq!(history.len(), 5);
assert_eq!(history.total_entries(), 10);
// Oldest entries should be trimmed
assert_eq!(history.min(), Some(5.0));
}
#[test]
fn test_clear() {
let mut history = EnergyHistory::default();
history.record(1.0);
history.record(2.0);
history.clear();
assert!(history.is_empty());
assert_eq!(history.current(), None);
}
}

View File

@@ -0,0 +1,690 @@
//! Incremental Coherence Computation
//!
//! This module provides efficient incremental updates to coherence energy
//! when only a subset of nodes or edges change. Instead of recomputing
//! the entire graph, we:
//!
//! 1. Track which edges are affected by each node update
//! 2. Recompute only those edge residuals
//! 3. Update the aggregate energy incrementally
//!
//! # Algorithm
//!
//! For a node update at node v:
//! 1. Find all edges incident to v: E_v = {(u,v) | (u,v) in E}
//! 2. For each edge e in E_v, recompute residual r_e
//! 3. Update total energy: E' = E - sum(old_e) + sum(new_e) for e in E_v
//!
//! # Complexity
//!
//! - Full computation: O(|E|) where E is the edge set
//! - Incremental update: O(deg(v)) where deg(v) is the degree of updated node
//!
//! # Example
//!
//! ```rust,ignore
//! use prime_radiant::coherence::{IncrementalEngine, IncrementalConfig};
//!
//! let engine = IncrementalEngine::new(IncrementalConfig::default());
//!
//! // Full computation first
//! let energy = engine.compute_full();
//!
//! // Subsequent updates are incremental
//! engine.node_updated("fact_1");
//! let delta = engine.compute_incremental();
//!
//! println!("Energy changed by: {}", delta.energy_delta);
//! ```
use super::energy::{CoherenceEnergy, EdgeEnergy, EdgeId};
use super::engine::{CoherenceEngine, NodeId};
use chrono::{DateTime, Utc};
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
/// Configuration for incremental computation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IncrementalConfig {
/// Whether to use incremental mode
pub enabled: bool,
/// Threshold for switching to full recomputation (percentage of edges affected)
pub full_recompute_threshold: f32,
/// Whether to batch multiple node updates
pub batch_updates: bool,
/// Maximum batch size before forcing computation
pub max_batch_size: usize,
/// Whether to track energy history for trend analysis
pub track_history: bool,
/// Maximum history entries to keep
pub history_size: usize,
}
impl Default for IncrementalConfig {
fn default() -> Self {
Self {
enabled: true,
full_recompute_threshold: 0.3, // 30% of edges affected -> full recompute
batch_updates: true,
max_batch_size: 100,
track_history: true,
history_size: 1000,
}
}
}
/// Result of an incremental computation
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DeltaResult {
/// Change in total energy
pub energy_delta: f32,
/// New total energy
pub new_energy: f32,
/// Previous total energy
pub old_energy: f32,
/// Number of edges recomputed
pub edges_recomputed: usize,
/// Total edges in graph
pub total_edges: usize,
/// Whether full recomputation was used
pub was_full_recompute: bool,
/// Computation time in microseconds
pub compute_time_us: u64,
/// Timestamp
pub timestamp: DateTime<Utc>,
}
impl DeltaResult {
/// Get the relative energy change
pub fn relative_change(&self) -> f32 {
if self.old_energy > 1e-10 {
self.energy_delta / self.old_energy
} else {
if self.new_energy > 1e-10 {
1.0
} else {
0.0
}
}
}
/// Check if energy increased
#[inline]
pub fn energy_increased(&self) -> bool {
self.energy_delta > 0.0
}
/// Check if energy decreased
#[inline]
pub fn energy_decreased(&self) -> bool {
self.energy_delta < 0.0
}
}
/// Update event for tracking changes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UpdateEvent {
/// A node's state was updated
NodeUpdated {
node_id: NodeId,
affected_edges: Vec<EdgeId>,
timestamp: DateTime<Utc>,
},
/// An edge was added
EdgeAdded {
edge_id: EdgeId,
timestamp: DateTime<Utc>,
},
/// An edge was removed
EdgeRemoved {
edge_id: EdgeId,
old_energy: f32,
timestamp: DateTime<Utc>,
},
/// A node was added
NodeAdded {
node_id: NodeId,
timestamp: DateTime<Utc>,
},
/// A node was removed
NodeRemoved {
node_id: NodeId,
removed_edges: Vec<EdgeId>,
removed_energy: f32,
timestamp: DateTime<Utc>,
},
}
impl UpdateEvent {
/// Get the timestamp of this event
pub fn timestamp(&self) -> DateTime<Utc> {
match self {
UpdateEvent::NodeUpdated { timestamp, .. } => *timestamp,
UpdateEvent::EdgeAdded { timestamp, .. } => *timestamp,
UpdateEvent::EdgeRemoved { timestamp, .. } => *timestamp,
UpdateEvent::NodeAdded { timestamp, .. } => *timestamp,
UpdateEvent::NodeRemoved { timestamp, .. } => *timestamp,
}
}
/// Check if this event affects the given edge
pub fn affects_edge(&self, edge_id: &str) -> bool {
match self {
UpdateEvent::NodeUpdated { affected_edges, .. } => {
affected_edges.contains(&edge_id.to_string())
}
UpdateEvent::EdgeAdded { edge_id: eid, .. } => eid == edge_id,
UpdateEvent::EdgeRemoved { edge_id: eid, .. } => eid == edge_id,
UpdateEvent::NodeAdded { .. } => false,
UpdateEvent::NodeRemoved { removed_edges, .. } => {
removed_edges.contains(&edge_id.to_string())
}
}
}
}
/// Cache for incremental computation
#[derive(Debug, Default)]
pub struct IncrementalCache {
/// Cached edge energies (edge_id -> energy value)
edge_energies: HashMap<EdgeId, f32>,
/// Cached edge residuals (edge_id -> residual vector)
edge_residuals: HashMap<EdgeId, Vec<f32>>,
/// Total cached energy
total_energy: f32,
/// Fingerprint when cache was last valid
last_fingerprint: String,
/// Dirty edges that need recomputation
dirty_edges: HashSet<EdgeId>,
/// Removed edge energies (for delta calculation)
removed_energies: HashMap<EdgeId, f32>,
}
impl IncrementalCache {
/// Create a new empty cache
pub fn new() -> Self {
Self::default()
}
/// Check if the cache is valid for the given fingerprint
#[inline]
pub fn is_valid(&self, fingerprint: &str) -> bool {
self.last_fingerprint == fingerprint && self.dirty_edges.is_empty()
}
/// Mark an edge as dirty (needs recomputation)
pub fn mark_dirty(&mut self, edge_id: impl Into<EdgeId>) {
self.dirty_edges.insert(edge_id.into());
}
/// Mark all edges incident to a node as dirty
pub fn mark_node_dirty(&mut self, incident_edges: &[EdgeId]) {
for edge_id in incident_edges {
self.dirty_edges.insert(edge_id.clone());
}
}
/// Update the cache with new edge energy
pub fn update_edge(&mut self, edge_id: impl Into<EdgeId>, energy: f32, residual: Vec<f32>) {
let edge_id = edge_id.into();
// Remove from dirty set
self.dirty_edges.remove(&edge_id);
// Update energy tracking
if let Some(old_energy) = self.edge_energies.get(&edge_id) {
self.total_energy -= old_energy;
}
self.total_energy += energy;
self.edge_energies.insert(edge_id.clone(), energy);
self.edge_residuals.insert(edge_id, residual);
}
/// Remove an edge from the cache
pub fn remove_edge(&mut self, edge_id: &str) {
if let Some(energy) = self.edge_energies.remove(edge_id) {
self.total_energy -= energy;
self.removed_energies.insert(edge_id.to_string(), energy);
}
self.edge_residuals.remove(edge_id);
self.dirty_edges.remove(edge_id);
}
/// Get cached energy for an edge
pub fn get_energy(&self, edge_id: &str) -> Option<f32> {
self.edge_energies.get(edge_id).copied()
}
/// Get cached residual for an edge
pub fn get_residual(&self, edge_id: &str) -> Option<&Vec<f32>> {
self.edge_residuals.get(edge_id)
}
/// Get the total cached energy
#[inline]
pub fn total_energy(&self) -> f32 {
self.total_energy
}
/// Get the number of dirty edges
#[inline]
pub fn dirty_count(&self) -> usize {
self.dirty_edges.len()
}
/// Get dirty edge IDs
pub fn dirty_edges(&self) -> &HashSet<EdgeId> {
&self.dirty_edges
}
/// Set the fingerprint
pub fn set_fingerprint(&mut self, fingerprint: impl Into<String>) {
self.last_fingerprint = fingerprint.into();
}
/// Clear all removed energies after processing
pub fn clear_removed(&mut self) {
self.removed_energies.clear();
}
/// Clear the entire cache
pub fn clear(&mut self) {
self.edge_energies.clear();
self.edge_residuals.clear();
self.total_energy = 0.0;
self.last_fingerprint.clear();
self.dirty_edges.clear();
self.removed_energies.clear();
}
}
/// Engine for incremental coherence computation
pub struct IncrementalEngine<'a> {
/// Reference to the coherence engine
engine: &'a CoherenceEngine,
/// Configuration
config: IncrementalConfig,
/// Incremental cache
cache: IncrementalCache,
/// Pending update events
pending_events: Vec<UpdateEvent>,
/// Energy history for trend analysis
energy_history: Vec<EnergyHistoryEntry>,
/// Statistics
stats: IncrementalStats,
}
/// Entry in energy history
#[derive(Debug, Clone, Serialize, Deserialize)]
struct EnergyHistoryEntry {
energy: f32,
timestamp: DateTime<Utc>,
was_incremental: bool,
edges_recomputed: usize,
}
/// Statistics about incremental computation
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
struct IncrementalStats {
total_updates: u64,
incremental_updates: u64,
full_recomputes: u64,
total_edges_recomputed: u64,
total_time_us: u64,
}
impl<'a> IncrementalEngine<'a> {
/// Create a new incremental engine
pub fn new(engine: &'a CoherenceEngine, config: IncrementalConfig) -> Self {
Self {
engine,
config,
cache: IncrementalCache::new(),
pending_events: Vec::new(),
energy_history: Vec::new(),
stats: IncrementalStats::default(),
}
}
/// Notify that a node was updated
pub fn node_updated(&mut self, node_id: impl Into<NodeId>) {
let node_id = node_id.into();
let affected_edges = self.engine.edges_incident_to(&node_id);
// Mark affected edges as dirty
self.cache.mark_node_dirty(&affected_edges);
// Record event
if self.config.track_history {
self.pending_events.push(UpdateEvent::NodeUpdated {
node_id,
affected_edges,
timestamp: Utc::now(),
});
}
}
/// Notify that an edge was added
pub fn edge_added(&mut self, edge_id: impl Into<EdgeId>) {
let edge_id = edge_id.into();
self.cache.mark_dirty(edge_id.clone());
if self.config.track_history {
self.pending_events.push(UpdateEvent::EdgeAdded {
edge_id,
timestamp: Utc::now(),
});
}
}
/// Notify that an edge was removed
pub fn edge_removed(&mut self, edge_id: impl Into<EdgeId>) {
let edge_id = edge_id.into();
let old_energy = self.cache.get_energy(&edge_id).unwrap_or(0.0);
self.cache.remove_edge(&edge_id);
if self.config.track_history {
self.pending_events.push(UpdateEvent::EdgeRemoved {
edge_id,
old_energy,
timestamp: Utc::now(),
});
}
}
/// Compute energy incrementally or fully based on dirty state
pub fn compute(&mut self) -> DeltaResult {
let start = std::time::Instant::now();
let old_energy = self.cache.total_energy();
let total_edges = self.engine.edge_count();
let dirty_count = self.cache.dirty_count();
// Decide whether to do incremental or full recompute
let ratio = if total_edges > 0 {
dirty_count as f32 / total_edges as f32
} else {
1.0
};
let (new_energy, edges_recomputed, was_full) = if !self.config.enabled
|| ratio > self.config.full_recompute_threshold
|| self.cache.last_fingerprint.is_empty()
{
// Full recompute
let energy = self.compute_full_internal();
(energy.total_energy, energy.edge_count, true)
} else {
// Incremental
let result = self.compute_incremental_internal();
(result, dirty_count, false)
};
let compute_time_us = start.elapsed().as_micros() as u64;
let energy_delta = new_energy - old_energy;
// Update stats
self.stats.total_updates += 1;
if was_full {
self.stats.full_recomputes += 1;
} else {
self.stats.incremental_updates += 1;
}
self.stats.total_edges_recomputed += edges_recomputed as u64;
self.stats.total_time_us += compute_time_us;
// Update history
if self.config.track_history {
self.energy_history.push(EnergyHistoryEntry {
energy: new_energy,
timestamp: Utc::now(),
was_incremental: !was_full,
edges_recomputed,
});
// Trim history
while self.energy_history.len() > self.config.history_size {
self.energy_history.remove(0);
}
}
// Clear pending events
self.pending_events.clear();
self.cache.clear_removed();
DeltaResult {
energy_delta,
new_energy,
old_energy,
edges_recomputed,
total_edges,
was_full_recompute: was_full,
compute_time_us,
timestamp: Utc::now(),
}
}
/// Force a full recomputation
pub fn compute_full(&mut self) -> CoherenceEnergy {
self.compute_full_internal()
}
/// Get the current cached energy
#[inline]
pub fn cached_energy(&self) -> f32 {
self.cache.total_energy()
}
/// Get the number of pending dirty edges
#[inline]
pub fn dirty_count(&self) -> usize {
self.cache.dirty_count()
}
/// Check if incremental mode is effective
pub fn incremental_ratio(&self) -> f32 {
if self.stats.total_updates > 0 {
self.stats.incremental_updates as f32 / self.stats.total_updates as f32
} else {
0.0
}
}
/// Get energy trend over recent history
pub fn energy_trend(&self, window: usize) -> Option<f32> {
if self.energy_history.len() < window {
return None;
}
let recent: Vec<_> = self.energy_history.iter().rev().take(window).collect();
// Linear regression slope
let n = recent.len() as f32;
let sum_x: f32 = (0..recent.len()).map(|i| i as f32).sum();
let sum_y: f32 = recent.iter().map(|e| e.energy).sum();
let sum_xy: f32 = recent
.iter()
.enumerate()
.map(|(i, e)| i as f32 * e.energy)
.sum();
let sum_xx: f32 = (0..recent.len()).map(|i| (i as f32).powi(2)).sum();
let slope = (n * sum_xy - sum_x * sum_y) / (n * sum_xx - sum_x * sum_x);
Some(slope)
}
// Private methods
fn compute_full_internal(&mut self) -> CoherenceEnergy {
let energy = self.engine.compute_energy();
// Rebuild cache from full computation
self.cache.clear();
for (edge_id, edge_energy) in &energy.edge_energies {
self.cache.update_edge(
edge_id.clone(),
edge_energy.energy,
edge_energy.residual.clone(),
);
}
self.cache.set_fingerprint(&energy.fingerprint);
energy
}
fn compute_incremental_internal(&mut self) -> f32 {
let dirty_edges: Vec<_> = self.cache.dirty_edges().iter().cloned().collect();
// Recompute dirty edges (parallel when feature enabled)
#[cfg(feature = "parallel")]
let new_energies: Vec<(EdgeId, EdgeEnergy)> = dirty_edges
.par_iter()
.filter_map(|edge_id| {
self.engine
.compute_edge_energy(edge_id)
.ok()
.map(|e| (edge_id.clone(), e))
})
.collect();
#[cfg(not(feature = "parallel"))]
let new_energies: Vec<(EdgeId, EdgeEnergy)> = dirty_edges
.iter()
.filter_map(|edge_id| {
self.engine
.compute_edge_energy(edge_id)
.ok()
.map(|e| (edge_id.clone(), e))
})
.collect();
// Update cache
for (edge_id, edge_energy) in new_energies {
self.cache
.update_edge(edge_id, edge_energy.energy, edge_energy.residual);
}
// Update fingerprint
self.cache
.set_fingerprint(self.engine.current_fingerprint());
self.cache.total_energy()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::coherence::engine::CoherenceConfig;
#[test]
fn test_incremental_cache() {
let mut cache = IncrementalCache::new();
cache.update_edge("e1", 1.0, vec![1.0]);
cache.update_edge("e2", 2.0, vec![1.4]);
assert_eq!(cache.total_energy(), 3.0);
assert_eq!(cache.get_energy("e1"), Some(1.0));
cache.remove_edge("e1");
assert_eq!(cache.total_energy(), 2.0);
assert_eq!(cache.get_energy("e1"), None);
}
#[test]
fn test_dirty_tracking() {
let mut cache = IncrementalCache::new();
cache.update_edge("e1", 1.0, vec![]);
cache.set_fingerprint("fp1");
assert_eq!(cache.dirty_count(), 0);
cache.mark_dirty("e1");
assert_eq!(cache.dirty_count(), 1);
assert!(!cache.is_valid("fp1"));
cache.update_edge("e1", 1.5, vec![]);
assert_eq!(cache.dirty_count(), 0);
}
#[test]
fn test_incremental_engine() {
let engine = CoherenceEngine::new(CoherenceConfig::default());
engine.add_node("n1", vec![1.0, 0.0]).unwrap();
engine.add_node("n2", vec![0.0, 1.0]).unwrap();
engine.add_edge("n1", "n2", 1.0, None).unwrap();
let mut inc = IncrementalEngine::new(&engine, IncrementalConfig::default());
// First compute is full
let result = inc.compute();
assert!(result.was_full_recompute);
assert_eq!(result.new_energy, 2.0); // |[1,-1]|^2 = 2
// No changes -> no dirty edges
assert_eq!(inc.dirty_count(), 0);
}
#[test]
fn test_delta_result() {
let result = DeltaResult {
energy_delta: 0.5,
new_energy: 2.5,
old_energy: 2.0,
edges_recomputed: 1,
total_edges: 10,
was_full_recompute: false,
compute_time_us: 100,
timestamp: Utc::now(),
};
assert!(result.energy_increased());
assert!(!result.energy_decreased());
assert!((result.relative_change() - 0.25).abs() < 1e-6);
}
#[test]
fn test_update_events() {
let event = UpdateEvent::NodeUpdated {
node_id: "n1".to_string(),
affected_edges: vec!["e1".to_string(), "e2".to_string()],
timestamp: Utc::now(),
};
assert!(event.affects_edge("e1"));
assert!(event.affects_edge("e2"));
assert!(!event.affects_edge("e3"));
}
#[test]
fn test_energy_trend() {
let engine = CoherenceEngine::default();
let mut inc = IncrementalEngine::new(
&engine,
IncrementalConfig {
track_history: true,
history_size: 10,
..Default::default()
},
);
// Manually populate history for testing
for i in 0..5 {
inc.energy_history.push(EnergyHistoryEntry {
energy: i as f32 * 0.5,
timestamp: Utc::now(),
was_incremental: true,
edges_recomputed: 1,
});
}
let trend = inc.energy_trend(4);
assert!(trend.is_some());
assert!(trend.unwrap() > 0.0); // Increasing trend
}
}

View File

@@ -0,0 +1,78 @@
//! Coherence Computation Engine
//!
//! This module implements the core coherence computation using sheaf Laplacian mathematics.
//! The key formula is: E(S) = sum(w_e * |r_e|^2) where r_e = rho_u(x_u) - rho_v(x_v)
//!
//! # Architecture
//!
//! ```text
//! +-------------------+
//! | CoherenceEngine | Aggregate with residual cache
//! +-------------------+
//! |
//! +------+------+
//! | |
//! v v
//! +-------+ +-----------+
//! | Energy | | Spectral | Value objects and analyzers
//! +-------+ +-----------+
//! |
//! v
//! +-------------------+
//! | Incremental | Efficient delta computation
//! +-------------------+
//! ```
//!
//! # Features
//!
//! - **Parallel Computation**: Uses rayon for parallel residual calculation
//! - **Fingerprint-Based Staleness**: Detects when recomputation is needed
//! - **Hotspot Identification**: Finds highest energy edges
//! - **SIMD Optimization**: Fast residual norm computation
//!
//! # Example
//!
//! ```rust,ignore
//! use prime_radiant::coherence::{CoherenceEngine, CoherenceConfig};
//!
//! let config = CoherenceConfig::default();
//! let mut engine = CoherenceEngine::new(config);
//!
//! // Add nodes and edges
//! engine.add_node("fact1", vec![1.0, 0.5, 0.3]);
//! engine.add_node("fact2", vec![0.9, 0.6, 0.2]);
//! engine.add_edge("fact1", "fact2", 1.0, None);
//!
//! // Compute coherence
//! let energy = engine.compute_energy();
//! println!("Total coherence energy: {}", energy.total_energy);
//!
//! // Update incrementally
//! engine.update_node("fact1", vec![1.0, 0.5, 0.4]);
//! let updated = engine.compute_incremental();
//! ```
mod energy;
mod engine;
mod history;
mod incremental;
mod spectral;
pub use energy::{
compute_norm_sq, compute_residual, CoherenceEnergy, EdgeEnergy, EnergySnapshot,
EnergyStatistics, HotspotInfo, ScopeEnergy, ScopeId,
};
pub use engine::{
CoherenceConfig, CoherenceEngine, CoherenceError, NodeState, RestrictionMap, Result, SheafEdge,
SheafNode,
};
pub use history::{EnergyHistory, EnergyHistoryConfig, EnergyTrend, TrendDirection};
pub use incremental::{
DeltaResult, IncrementalCache, IncrementalConfig, IncrementalEngine, UpdateEvent,
};
pub use spectral::{
compute_eigenvalues, DriftEvent, DriftSeverity, SpectralAnalyzer, SpectralConfig, SpectralStats,
};
// Alias for compatibility
pub use incremental::IncrementalCache as ResidualCache;

View File

@@ -0,0 +1,737 @@
//! Spectral Analysis for Coherence Drift Detection
//!
//! This module provides eigenvalue-based drift detection using the sheaf Laplacian.
//! Spectral analysis reveals structural changes in the coherence graph that may not
//! be apparent from simple energy metrics.
//!
//! # Theory
//!
//! The sheaf Laplacian L = D - A (weighted degree - adjacency) has eigenvalues that
//! characterize the graph's coherence structure:
//!
//! - **Algebraic connectivity** (second smallest eigenvalue): Measures how well-connected
//! the graph is; a drop indicates structural weakening
//! - **Spectral gap**: Difference between first and second eigenvalues; indicates
//! separation between components
//! - **Eigenvalue distribution drift**: Changes in the overall spectrum indicate
//! fundamental structural shifts
//!
//! # Example
//!
//! ```rust,ignore
//! use prime_radiant::coherence::{SpectralAnalyzer, SpectralConfig};
//!
//! let mut analyzer = SpectralAnalyzer::new(SpectralConfig::default());
//!
//! // Record eigenvalues over time
//! analyzer.record_eigenvalues(vec![0.0, 0.5, 1.2, 2.1]);
//! analyzer.record_eigenvalues(vec![0.0, 0.3, 1.0, 2.0]); // Drop in second eigenvalue
//!
//! // Check for drift
//! if let Some(event) = analyzer.detect_drift() {
//! println!("Drift detected: {:?}", event);
//! }
//! ```
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
/// Configuration for spectral analysis
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpectralConfig {
/// Number of top eigenvalues to track
pub num_eigenvalues: usize,
/// Maximum history length
pub history_size: usize,
/// Threshold for detecting drift (relative change)
pub drift_threshold: f32,
/// Threshold for detecting severe drift
pub severe_threshold: f32,
/// Minimum number of samples before drift detection
pub min_samples: usize,
/// Smoothing factor for exponential moving average (0 = no smoothing)
pub smoothing_alpha: f32,
}
impl Default for SpectralConfig {
fn default() -> Self {
Self {
num_eigenvalues: 10,
history_size: 100,
drift_threshold: 0.1, // 10% relative change
severe_threshold: 0.25, // 25% relative change
min_samples: 3,
smoothing_alpha: 0.3,
}
}
}
/// Severity level of detected drift
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum DriftSeverity {
/// Minor drift - may be noise
Minor,
/// Moderate drift - warrants attention
Moderate,
/// Severe drift - requires action
Severe,
/// Critical drift - structural breakdown
Critical,
}
impl DriftSeverity {
/// Get numeric severity level (higher = more severe)
pub fn level(&self) -> u8 {
match self {
DriftSeverity::Minor => 1,
DriftSeverity::Moderate => 2,
DriftSeverity::Severe => 3,
DriftSeverity::Critical => 4,
}
}
/// Check if this severity requires escalation
pub fn requires_escalation(&self) -> bool {
matches!(self, DriftSeverity::Severe | DriftSeverity::Critical)
}
}
/// A detected drift event
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DriftEvent {
/// Magnitude of the drift (spectral distance)
pub magnitude: f32,
/// Severity classification
pub severity: DriftSeverity,
/// Which eigenvalue modes are affected (indices)
pub affected_modes: Vec<usize>,
/// Direction of drift for each affected mode (positive = increasing)
pub mode_changes: Vec<f32>,
/// Timestamp when drift was detected
pub timestamp: DateTime<Utc>,
/// Algebraic connectivity change (second eigenvalue)
pub connectivity_change: f32,
/// Spectral gap change
pub spectral_gap_change: f32,
/// Description of the drift
pub description: String,
}
impl DriftEvent {
/// Check if connectivity is weakening
pub fn is_connectivity_weakening(&self) -> bool {
self.connectivity_change < 0.0
}
/// Check if this indicates component separation
pub fn indicates_separation(&self) -> bool {
// Increasing spectral gap indicates components drifting apart
self.spectral_gap_change > 0.0 && self.connectivity_change < 0.0
}
}
/// Entry in the eigenvalue history
#[derive(Debug, Clone)]
struct EigenvalueSnapshot {
/// Eigenvalues (sorted ascending)
eigenvalues: Vec<f32>,
/// Timestamp
timestamp: DateTime<Utc>,
/// Algebraic connectivity (second smallest eigenvalue)
connectivity: f32,
/// Spectral gap (difference between first two eigenvalues)
spectral_gap: f32,
}
impl EigenvalueSnapshot {
fn new(mut eigenvalues: Vec<f32>) -> Self {
// Sort eigenvalues
eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let connectivity = if eigenvalues.len() > 1 {
eigenvalues[1]
} else {
0.0
};
let spectral_gap = if eigenvalues.len() > 1 {
eigenvalues[1] - eigenvalues[0]
} else {
0.0
};
Self {
eigenvalues,
timestamp: Utc::now(),
connectivity,
spectral_gap,
}
}
}
/// Spectral analyzer for drift detection
pub struct SpectralAnalyzer {
/// Configuration
config: SpectralConfig,
/// History of eigenvalue snapshots
history: VecDeque<EigenvalueSnapshot>,
/// Exponential moving average of eigenvalues
ema_eigenvalues: Option<Vec<f32>>,
/// Last detected drift event
last_drift: Option<DriftEvent>,
/// Statistics
total_samples: u64,
drift_events: u64,
}
impl SpectralAnalyzer {
/// Create a new spectral analyzer
pub fn new(config: SpectralConfig) -> Self {
Self {
config,
history: VecDeque::new(),
ema_eigenvalues: None,
last_drift: None,
total_samples: 0,
drift_events: 0,
}
}
/// Record new eigenvalues
pub fn record_eigenvalues(&mut self, eigenvalues: Vec<f32>) {
let snapshot = EigenvalueSnapshot::new(eigenvalues);
// Update EMA
if let Some(ref mut ema) = self.ema_eigenvalues {
let alpha = self.config.smoothing_alpha;
for (i, &val) in snapshot.eigenvalues.iter().enumerate() {
if i < ema.len() {
ema[i] = alpha * val + (1.0 - alpha) * ema[i];
}
}
} else {
self.ema_eigenvalues = Some(snapshot.eigenvalues.clone());
}
self.history.push_back(snapshot);
self.total_samples += 1;
// Trim history
while self.history.len() > self.config.history_size {
self.history.pop_front();
}
}
/// Detect drift based on recent eigenvalue changes
pub fn detect_drift(&mut self) -> Option<DriftEvent> {
if self.history.len() < self.config.min_samples {
return None;
}
let current = self.history.back()?;
let previous = self.history.get(self.history.len() - 2)?;
// Compute spectral distance
let distance = self.spectral_distance(&current.eigenvalues, &previous.eigenvalues);
// Check threshold
if distance < self.config.drift_threshold {
return None;
}
// Identify affected modes
let (affected_modes, mode_changes) = self.identify_affected_modes(current, previous);
// Compute connectivity and gap changes
let connectivity_change = current.connectivity - previous.connectivity;
let spectral_gap_change = current.spectral_gap - previous.spectral_gap;
// Determine severity
let severity = self.classify_severity(distance, connectivity_change);
// Build description
let description = self.build_description(
&affected_modes,
connectivity_change,
spectral_gap_change,
severity,
);
let event = DriftEvent {
magnitude: distance,
severity,
affected_modes,
mode_changes,
timestamp: Utc::now(),
connectivity_change,
spectral_gap_change,
description,
};
self.last_drift = Some(event.clone());
self.drift_events += 1;
Some(event)
}
/// Get the current algebraic connectivity (second smallest eigenvalue)
pub fn algebraic_connectivity(&self) -> Option<f32> {
self.history.back().map(|s| s.connectivity)
}
/// Get the current spectral gap
pub fn spectral_gap(&self) -> Option<f32> {
self.history.back().map(|s| s.spectral_gap)
}
/// Get the smoothed eigenvalues (EMA)
pub fn smoothed_eigenvalues(&self) -> Option<&Vec<f32>> {
self.ema_eigenvalues.as_ref()
}
/// Get drift trend over recent history
pub fn drift_trend(&self, window: usize) -> Option<f32> {
if self.history.len() < window + 1 {
return None;
}
let recent: Vec<_> = self.history.iter().rev().take(window + 1).collect();
// Compute average pairwise distance
let mut total_distance = 0.0;
for i in 0..recent.len() - 1 {
total_distance +=
self.spectral_distance(&recent[i].eigenvalues, &recent[i + 1].eigenvalues);
}
Some(total_distance / window as f32)
}
/// Check if the system is currently in a drift state
pub fn is_drifting(&self) -> bool {
self.drift_trend(self.config.min_samples)
.map(|trend| trend > self.config.drift_threshold)
.unwrap_or(false)
}
/// Get statistics
pub fn stats(&self) -> SpectralStats {
SpectralStats {
total_samples: self.total_samples,
drift_events: self.drift_events,
history_size: self.history.len(),
current_connectivity: self.algebraic_connectivity(),
current_spectral_gap: self.spectral_gap(),
is_drifting: self.is_drifting(),
}
}
/// Clear history
pub fn clear(&mut self) {
self.history.clear();
self.ema_eigenvalues = None;
self.last_drift = None;
}
// Private methods
/// Compute spectral distance between two eigenvalue vectors
fn spectral_distance(&self, a: &[f32], b: &[f32]) -> f32 {
let len = a.len().min(b.len());
if len == 0 {
return 0.0;
}
// Use relative L2 distance
let mut sum_sq = 0.0;
let mut sum_ref = 0.0;
for i in 0..len {
let diff = a[i] - b[i];
sum_sq += diff * diff;
sum_ref += b[i].abs();
}
if sum_ref > 1e-10 {
(sum_sq.sqrt()) / (sum_ref / len as f32)
} else {
sum_sq.sqrt()
}
}
/// Identify which eigenvalue modes are affected
fn identify_affected_modes(
&self,
current: &EigenvalueSnapshot,
previous: &EigenvalueSnapshot,
) -> (Vec<usize>, Vec<f32>) {
let mut affected = Vec::new();
let mut changes = Vec::new();
let len = current.eigenvalues.len().min(previous.eigenvalues.len());
for i in 0..len {
let change = current.eigenvalues[i] - previous.eigenvalues[i];
let relative_change = if previous.eigenvalues[i].abs() > 1e-10 {
change.abs() / previous.eigenvalues[i].abs()
} else {
change.abs()
};
if relative_change > self.config.drift_threshold / 2.0 {
affected.push(i);
changes.push(change);
}
}
(affected, changes)
}
/// Classify drift severity
fn classify_severity(&self, distance: f32, connectivity_change: f32) -> DriftSeverity {
let is_connectivity_loss = connectivity_change < -self.config.drift_threshold;
if distance > self.config.severe_threshold * 2.0
|| (is_connectivity_loss && distance > self.config.severe_threshold)
{
DriftSeverity::Critical
} else if distance > self.config.severe_threshold {
DriftSeverity::Severe
} else if distance > self.config.drift_threshold * 1.5 || is_connectivity_loss {
DriftSeverity::Moderate
} else {
DriftSeverity::Minor
}
}
/// Build human-readable description
fn build_description(
&self,
affected_modes: &[usize],
connectivity_change: f32,
spectral_gap_change: f32,
severity: DriftSeverity,
) -> String {
let mut parts = Vec::new();
// Severity
parts.push(format!("{:?} spectral drift detected", severity));
// Affected modes
if !affected_modes.is_empty() {
let mode_str = affected_modes
.iter()
.map(|m| m.to_string())
.collect::<Vec<_>>()
.join(", ");
parts.push(format!("affecting modes [{}]", mode_str));
}
// Connectivity
if connectivity_change < 0.0 {
parts.push(format!(
"connectivity decreased by {:.2}%",
connectivity_change.abs() * 100.0
));
} else if connectivity_change > 0.0 {
parts.push(format!(
"connectivity increased by {:.2}%",
connectivity_change * 100.0
));
}
// Spectral gap
if spectral_gap_change.abs() > 0.01 {
let direction = if spectral_gap_change > 0.0 {
"widened"
} else {
"narrowed"
};
parts.push(format!(
"spectral gap {} by {:.2}%",
direction,
spectral_gap_change.abs() * 100.0
));
}
parts.join("; ")
}
}
impl Default for SpectralAnalyzer {
fn default() -> Self {
Self::new(SpectralConfig::default())
}
}
/// Statistics about spectral analysis
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpectralStats {
/// Total samples recorded
pub total_samples: u64,
/// Number of drift events detected
pub drift_events: u64,
/// Current history size
pub history_size: usize,
/// Current algebraic connectivity
pub current_connectivity: Option<f32>,
/// Current spectral gap
pub current_spectral_gap: Option<f32>,
/// Whether currently drifting
pub is_drifting: bool,
}
/// Compute eigenvalues of a symmetric matrix (Laplacian)
///
/// This is a simplified eigenvalue computation for small matrices.
/// For production use with large graphs, use the `spectral` feature
/// which provides `nalgebra` integration.
#[cfg(not(feature = "spectral"))]
pub fn compute_eigenvalues(laplacian: &[Vec<f32>], k: usize) -> Vec<f32> {
// Power iteration for top eigenvalue, deflation for subsequent
// This is a simplified implementation - use nalgebra for production
let n = laplacian.len();
if n == 0 || k == 0 {
return Vec::new();
}
let mut eigenvalues = Vec::with_capacity(k.min(n));
// Start with a copy of the matrix
let mut matrix: Vec<Vec<f32>> = laplacian.to_vec();
for _ in 0..k.min(n) {
// Power iteration
let lambda = power_iteration(&matrix, 100, 1e-6);
eigenvalues.push(lambda);
// Deflate matrix
deflate_matrix(&mut matrix, lambda);
}
// Sort ascending (Laplacian eigenvalues are non-negative)
eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
eigenvalues
}
/// Power iteration to find the largest eigenvalue
#[cfg(not(feature = "spectral"))]
fn power_iteration(matrix: &[Vec<f32>], max_iters: usize, tolerance: f32) -> f32 {
let n = matrix.len();
if n == 0 {
return 0.0;
}
// Initialize with random vector
let mut v: Vec<f32> = (0..n).map(|i| (i as f32 + 1.0) / n as f32).collect();
normalize(&mut v);
let mut lambda = 0.0;
for _ in 0..max_iters {
// w = A * v
let mut w = vec![0.0; n];
for i in 0..n {
for j in 0..n {
w[i] += matrix[i][j] * v[j];
}
}
// Rayleigh quotient
let new_lambda: f32 = v.iter().zip(w.iter()).map(|(vi, wi)| vi * wi).sum();
// Normalize
normalize(&mut w);
v = w;
// Check convergence
if (new_lambda - lambda).abs() < tolerance {
return new_lambda;
}
lambda = new_lambda;
}
lambda
}
/// Normalize a vector in-place
#[cfg(not(feature = "spectral"))]
fn normalize(v: &mut [f32]) {
let norm: f32 = v.iter().map(|x| x * x).sum::<f32>().sqrt();
if norm > 1e-10 {
for x in v.iter_mut() {
*x /= norm;
}
}
}
/// Deflate matrix to find next eigenvalue
#[cfg(not(feature = "spectral"))]
fn deflate_matrix(matrix: &mut [Vec<f32>], lambda: f32) {
let n = matrix.len();
// Simple deflation: A' = A - lambda * I
// This is approximate but sufficient for drift detection
for i in 0..n {
matrix[i][i] -= lambda;
}
}
/// Compute eigenvalues using nalgebra (when spectral feature is enabled)
#[cfg(feature = "spectral")]
pub fn compute_eigenvalues(laplacian: &[Vec<f32>], k: usize) -> Vec<f32> {
use nalgebra::{DMatrix, SymmetricEigen};
let n = laplacian.len();
if n == 0 || k == 0 {
return Vec::new();
}
// Convert to nalgebra matrix
let data: Vec<f64> = laplacian
.iter()
.flat_map(|row| row.iter().map(|&x| x as f64))
.collect();
let matrix = DMatrix::from_row_slice(n, n, &data);
// Compute eigenvalues
let eigen = SymmetricEigen::new(matrix);
let mut eigenvalues: Vec<f32> = eigen.eigenvalues.iter().map(|&x| x as f32).collect();
// Sort and take top k
eigenvalues.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
eigenvalues.truncate(k);
eigenvalues
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_spectral_analyzer_creation() {
let analyzer = SpectralAnalyzer::default();
assert_eq!(analyzer.stats().total_samples, 0);
assert!(!analyzer.is_drifting());
}
#[test]
fn test_record_eigenvalues() {
let mut analyzer = SpectralAnalyzer::default();
analyzer.record_eigenvalues(vec![0.0, 0.5, 1.0, 2.0]);
assert_eq!(analyzer.stats().total_samples, 1);
assert_eq!(analyzer.algebraic_connectivity(), Some(0.5));
assert_eq!(analyzer.spectral_gap(), Some(0.5));
}
#[test]
fn test_drift_detection() {
let config = SpectralConfig {
drift_threshold: 0.1,
severe_threshold: 0.3,
min_samples: 2,
..Default::default()
};
let mut analyzer = SpectralAnalyzer::new(config);
// Record stable eigenvalues
analyzer.record_eigenvalues(vec![0.0, 0.5, 1.0, 2.0]);
analyzer.record_eigenvalues(vec![0.0, 0.5, 1.0, 2.0]);
// No drift yet
assert!(analyzer.detect_drift().is_none());
// Record significant change
analyzer.record_eigenvalues(vec![0.0, 0.2, 0.8, 1.5]); // Connectivity dropped
let drift = analyzer.detect_drift();
assert!(drift.is_some());
let event = drift.unwrap();
assert!(event.connectivity_change < 0.0);
}
#[test]
fn test_drift_severity() {
let config = SpectralConfig {
drift_threshold: 0.1,
severe_threshold: 0.3,
min_samples: 2,
..Default::default()
};
let mut analyzer = SpectralAnalyzer::new(config);
analyzer.record_eigenvalues(vec![0.0, 1.0, 2.0, 3.0]);
analyzer.record_eigenvalues(vec![0.0, 0.1, 0.5, 1.0]); // Drastic change
let drift = analyzer.detect_drift().unwrap();
assert!(drift.severity.level() >= DriftSeverity::Moderate.level());
}
#[test]
fn test_smoothed_eigenvalues() {
let mut analyzer = SpectralAnalyzer::new(SpectralConfig {
smoothing_alpha: 0.5,
..Default::default()
});
analyzer.record_eigenvalues(vec![0.0, 1.0, 2.0]);
let first = analyzer.smoothed_eigenvalues().unwrap().clone();
analyzer.record_eigenvalues(vec![0.0, 1.5, 2.5]);
let second = analyzer.smoothed_eigenvalues().unwrap();
// EMA should be between first and second values
assert!(second[1] > 1.0 && second[1] < 1.5);
}
#[test]
fn test_spectral_stats() {
let mut analyzer = SpectralAnalyzer::default();
analyzer.record_eigenvalues(vec![0.0, 0.5, 1.0]);
let stats = analyzer.stats();
assert_eq!(stats.total_samples, 1);
assert_eq!(stats.history_size, 1);
assert_eq!(stats.current_connectivity, Some(0.5));
}
#[test]
#[cfg(not(feature = "spectral"))]
fn test_compute_eigenvalues() {
// Identity matrix has all eigenvalues = 1
let identity = vec![
vec![1.0, 0.0, 0.0],
vec![0.0, 1.0, 0.0],
vec![0.0, 0.0, 1.0],
];
let eigenvalues = compute_eigenvalues(&identity, 3);
assert_eq!(eigenvalues.len(), 3);
// All should be close to 1.0
for ev in eigenvalues {
assert!((ev - 1.0).abs() < 0.1 || ev.abs() < 0.1);
}
}
#[test]
fn test_history_trimming() {
let config = SpectralConfig {
history_size: 5,
..Default::default()
};
let mut analyzer = SpectralAnalyzer::new(config);
for i in 0..10 {
analyzer.record_eigenvalues(vec![0.0, i as f32 * 0.1]);
}
assert_eq!(analyzer.stats().history_size, 5);
}
}