Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
772
vendor/ruvector/crates/prime-radiant/tests/integration/coherence_tests.rs
vendored
Normal file
772
vendor/ruvector/crates/prime-radiant/tests/integration/coherence_tests.rs
vendored
Normal file
@@ -0,0 +1,772 @@
|
||||
//! Integration tests for Coherence Computation
|
||||
//!
|
||||
//! Tests the Coherence Computation bounded context, verifying:
|
||||
//! - Full energy computation from graph state
|
||||
//! - Incremental updates when nodes change
|
||||
//! - Spectral drift detection
|
||||
//! - Hotspot identification
|
||||
//! - Caching and fingerprint-based staleness
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
// ============================================================================
|
||||
// TEST INFRASTRUCTURE
|
||||
// ============================================================================
|
||||
|
||||
/// Simple restriction map for testing
|
||||
struct RestrictionMap {
|
||||
matrix: Vec<Vec<f32>>,
|
||||
bias: Vec<f32>,
|
||||
}
|
||||
|
||||
impl RestrictionMap {
|
||||
fn new(rows: usize, cols: usize) -> Self {
|
||||
// Identity-like (truncated or padded)
|
||||
let matrix: Vec<Vec<f32>> = (0..rows)
|
||||
.map(|i| (0..cols).map(|j| if i == j { 1.0 } else { 0.0 }).collect())
|
||||
.collect();
|
||||
let bias = vec![0.0; rows];
|
||||
Self { matrix, bias }
|
||||
}
|
||||
|
||||
fn apply(&self, input: &[f32]) -> Vec<f32> {
|
||||
self.matrix
|
||||
.iter()
|
||||
.zip(&self.bias)
|
||||
.map(|(row, b)| row.iter().zip(input).map(|(a, x)| a * x).sum::<f32>() + b)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn output_dim(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple edge for testing
|
||||
struct TestEdge {
|
||||
source: u64,
|
||||
target: u64,
|
||||
weight: f32,
|
||||
rho_source: RestrictionMap,
|
||||
rho_target: RestrictionMap,
|
||||
}
|
||||
|
||||
impl TestEdge {
|
||||
fn compute_residual(&self, states: &HashMap<u64, Vec<f32>>) -> Option<Vec<f32>> {
|
||||
let source_state = states.get(&self.source)?;
|
||||
let target_state = states.get(&self.target)?;
|
||||
|
||||
let projected_source = self.rho_source.apply(source_state);
|
||||
let projected_target = self.rho_target.apply(target_state);
|
||||
|
||||
Some(
|
||||
projected_source
|
||||
.iter()
|
||||
.zip(&projected_target)
|
||||
.map(|(a, b)| a - b)
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn compute_energy(&self, states: &HashMap<u64, Vec<f32>>) -> Option<f32> {
|
||||
let residual = self.compute_residual(states)?;
|
||||
let norm_sq: f32 = residual.iter().map(|x| x * x).sum();
|
||||
Some(self.weight * norm_sq)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple coherence energy computation
|
||||
fn compute_total_energy(
|
||||
states: &HashMap<u64, Vec<f32>>,
|
||||
edges: &[TestEdge],
|
||||
) -> (f32, HashMap<usize, f32>) {
|
||||
let mut total = 0.0;
|
||||
let mut edge_energies = HashMap::new();
|
||||
|
||||
for (i, edge) in edges.iter().enumerate() {
|
||||
if let Some(energy) = edge.compute_energy(states) {
|
||||
total += energy;
|
||||
edge_energies.insert(i, energy);
|
||||
}
|
||||
}
|
||||
|
||||
(total, edge_energies)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ENERGY COMPUTATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_computation_consistent_section() {
|
||||
// A consistent section (all nodes agree) should have zero energy
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
states.insert(2, vec![1.0, 0.5, 0.3]); // Same state
|
||||
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
}];
|
||||
|
||||
let (total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Energy should be zero (or very close) for consistent section
|
||||
assert!(total < 1e-10, "Expected near-zero energy, got {}", total);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_computation_inconsistent_section() {
|
||||
// Inconsistent states should produce positive energy
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
states.insert(2, vec![0.5, 0.8, 0.1]); // Different state
|
||||
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
}];
|
||||
|
||||
let (total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Compute expected energy manually
|
||||
let residual = vec![1.0 - 0.5, 0.5 - 0.8, 0.3 - 0.1]; // [0.5, -0.3, 0.2]
|
||||
let expected: f32 = residual.iter().map(|x| x * x).sum(); // 0.25 + 0.09 + 0.04 = 0.38
|
||||
|
||||
assert!(
|
||||
(total - expected).abs() < 1e-6,
|
||||
"Expected energy {}, got {}",
|
||||
expected,
|
||||
total
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_computation_weighted_edges() {
|
||||
// Edge weight should scale energy proportionally
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.0, 0.0]);
|
||||
|
||||
let weight1 = 1.0;
|
||||
let weight10 = 10.0;
|
||||
|
||||
let edges_w1 = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: weight1,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
}];
|
||||
|
||||
let edges_w10 = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: weight10,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
}];
|
||||
|
||||
let (energy_w1, _) = compute_total_energy(&states, &edges_w1);
|
||||
let (energy_w10, _) = compute_total_energy(&states, &edges_w10);
|
||||
|
||||
assert!(
|
||||
(energy_w10 / energy_w1 - 10.0).abs() < 1e-6,
|
||||
"Expected 10x energy scaling"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_is_nonnegative() {
|
||||
// Energy should always be non-negative (sum of squared terms)
|
||||
use rand::Rng;
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for _ in 0..100 {
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, (0..4).map(|_| rng.gen_range(-10.0..10.0)).collect());
|
||||
states.insert(2, (0..4).map(|_| rng.gen_range(-10.0..10.0)).collect());
|
||||
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: rng.gen_range(0.0..10.0),
|
||||
rho_source: RestrictionMap::new(4, 4),
|
||||
rho_target: RestrictionMap::new(4, 4),
|
||||
}];
|
||||
|
||||
let (total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
assert!(total >= 0.0, "Energy must be non-negative, got {}", total);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_with_multiple_edges() {
|
||||
// Total energy should be sum of individual edge energies
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.5, 0.0]);
|
||||
states.insert(3, vec![0.0, 0.0]);
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
let (total, edge_energies) = compute_total_energy(&states, &edges);
|
||||
|
||||
let sum_of_parts: f32 = edge_energies.values().sum();
|
||||
assert!(
|
||||
(total - sum_of_parts).abs() < 1e-10,
|
||||
"Total should equal sum of parts"
|
||||
);
|
||||
|
||||
// Verify individual energies
|
||||
// Edge 1-2: residual = [0.5, 0.0], energy = 0.25
|
||||
// Edge 2-3: residual = [0.5, 0.0], energy = 0.25
|
||||
assert!((total - 0.5).abs() < 1e-6);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// INCREMENTAL UPDATE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_incremental_update_single_node() {
|
||||
// Updating a single node should only affect incident edges
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.5, 0.0]);
|
||||
states.insert(3, vec![0.0, 0.0]);
|
||||
|
||||
// Edges: 1-2, 3 is isolated
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
}];
|
||||
|
||||
let (energy_before, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Update node 1
|
||||
states.insert(1, vec![0.8, 0.0]);
|
||||
let (energy_after, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Energy should change because node 1 is incident to an edge
|
||||
assert_ne!(
|
||||
energy_before, energy_after,
|
||||
"Energy should change when incident node updates"
|
||||
);
|
||||
|
||||
// Update isolated node 3
|
||||
states.insert(3, vec![0.5, 0.5]);
|
||||
let (energy_isolated, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Energy should NOT change because node 3 is isolated
|
||||
assert!(
|
||||
(energy_after - energy_isolated).abs() < 1e-10,
|
||||
"Energy should not change when isolated node updates"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incremental_update_affected_edges() {
|
||||
// Helper to find edges affected by a node update
|
||||
fn affected_edges(node_id: u64, edges: &[TestEdge]) -> Vec<usize> {
|
||||
edges
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, e)| e.source == node_id || e.target == node_id)
|
||||
.map(|(i, _)| i)
|
||||
.collect()
|
||||
}
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 3,
|
||||
target: 4,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
// Node 2 is incident to edges 0 and 1
|
||||
let affected = affected_edges(2, &edges);
|
||||
assert_eq!(affected, vec![0, 1]);
|
||||
|
||||
// Node 1 is incident to edge 0 only
|
||||
let affected = affected_edges(1, &edges);
|
||||
assert_eq!(affected, vec![0]);
|
||||
|
||||
// Node 4 is incident to edge 2 only
|
||||
let affected = affected_edges(4, &edges);
|
||||
assert_eq!(affected, vec![2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incremental_vs_full_recomputation() {
|
||||
// Incremental and full recomputation should produce the same result
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
states.insert(2, vec![0.8, 0.6, 0.4]);
|
||||
states.insert(3, vec![0.6, 0.7, 0.5]);
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
},
|
||||
];
|
||||
|
||||
// Full computation
|
||||
let (energy_full, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Simulate incremental by computing only affected edges
|
||||
let affected_by_node2: Vec<usize> = edges
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, e)| e.source == 2 || e.target == 2)
|
||||
.map(|(i, _)| i)
|
||||
.collect();
|
||||
|
||||
let mut incremental_sum = 0.0;
|
||||
for i in 0..edges.len() {
|
||||
if let Some(energy) = edges[i].compute_energy(&states) {
|
||||
incremental_sum += energy;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
(energy_full - incremental_sum).abs() < 1e-10,
|
||||
"Incremental and full should match"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// RESIDUAL COMPUTATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_residual_symmetry() {
|
||||
// r_e for edge (u,v) should be negation of r_e for edge (v,u)
|
||||
// when restriction maps are the same
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5]);
|
||||
states.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
let rho = RestrictionMap::new(2, 2);
|
||||
|
||||
let edge_uv = TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
};
|
||||
|
||||
let edge_vu = TestEdge {
|
||||
source: 2,
|
||||
target: 1,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
};
|
||||
|
||||
let r_uv = edge_uv.compute_residual(&states).unwrap();
|
||||
let r_vu = edge_vu.compute_residual(&states).unwrap();
|
||||
|
||||
// Check that r_uv = -r_vu
|
||||
for (a, b) in r_uv.iter().zip(&r_vu) {
|
||||
assert!(
|
||||
(a + b).abs() < 1e-10,
|
||||
"Residuals should be negations of each other"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_residual_dimension() {
|
||||
// Residual dimension should match restriction map output dimension
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3, 0.2]);
|
||||
states.insert(2, vec![0.8, 0.6, 0.4, 0.3]);
|
||||
|
||||
let edge = TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 4), // 4D -> 2D
|
||||
rho_target: RestrictionMap::new(2, 4),
|
||||
};
|
||||
|
||||
let residual = edge.compute_residual(&states).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
residual.len(),
|
||||
edge.rho_source.output_dim(),
|
||||
"Residual dimension should match restriction map output"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// HOTSPOT IDENTIFICATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_hotspot_identification() {
|
||||
// Find edges with highest energy
|
||||
fn find_hotspots(edge_energies: &HashMap<usize, f32>, k: usize) -> Vec<(usize, f32)> {
|
||||
let mut sorted: Vec<_> = edge_energies.iter().collect();
|
||||
sorted.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap());
|
||||
sorted.into_iter().take(k).map(|(i, e)| (*i, *e)).collect()
|
||||
}
|
||||
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.1, 0.0]); // Large difference with 1
|
||||
states.insert(3, vec![0.05, 0.0]); // Small difference with 2
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
let (_, edge_energies) = compute_total_energy(&states, &edges);
|
||||
|
||||
let hotspots = find_hotspots(&edge_energies, 1);
|
||||
|
||||
// Edge 0 (1-2) should have higher energy
|
||||
assert_eq!(hotspots[0].0, 0, "Edge 1-2 should be the hotspot");
|
||||
assert!(
|
||||
edge_energies.get(&0).unwrap() > edge_energies.get(&1).unwrap(),
|
||||
"Edge 1-2 should have higher energy than edge 2-3"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SCOPE-BASED ENERGY TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_by_scope() {
|
||||
// Energy can be aggregated by scope (namespace)
|
||||
let mut states = HashMap::new();
|
||||
let mut node_scopes: HashMap<u64, String> = HashMap::new();
|
||||
|
||||
// Finance nodes
|
||||
states.insert(1, vec![1.0, 0.5]);
|
||||
states.insert(2, vec![0.8, 0.6]);
|
||||
node_scopes.insert(1, "finance".to_string());
|
||||
node_scopes.insert(2, "finance".to_string());
|
||||
|
||||
// Medical nodes
|
||||
states.insert(3, vec![0.5, 0.3]);
|
||||
states.insert(4, vec![0.2, 0.1]);
|
||||
node_scopes.insert(3, "medical".to_string());
|
||||
node_scopes.insert(4, "medical".to_string());
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 3,
|
||||
target: 4,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
fn energy_by_scope(
|
||||
edges: &[TestEdge],
|
||||
states: &HashMap<u64, Vec<f32>>,
|
||||
node_scopes: &HashMap<u64, String>,
|
||||
) -> HashMap<String, f32> {
|
||||
let mut scope_energy: HashMap<String, f32> = HashMap::new();
|
||||
|
||||
for edge in edges {
|
||||
if let Some(energy) = edge.compute_energy(states) {
|
||||
let source_scope = node_scopes.get(&edge.source).cloned().unwrap_or_default();
|
||||
*scope_energy.entry(source_scope).or_insert(0.0) += energy;
|
||||
}
|
||||
}
|
||||
|
||||
scope_energy
|
||||
}
|
||||
|
||||
let by_scope = energy_by_scope(&edges, &states, &node_scopes);
|
||||
|
||||
assert!(by_scope.contains_key("finance"));
|
||||
assert!(by_scope.contains_key("medical"));
|
||||
assert!(by_scope.get("finance").unwrap() > &0.0);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FINGERPRINT AND CACHING TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_cache_invalidation_on_state_change() {
|
||||
// Cached energy should be invalidated when state changes
|
||||
struct CachedEnergy {
|
||||
value: Option<f32>,
|
||||
fingerprint: u64,
|
||||
}
|
||||
|
||||
impl CachedEnergy {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
value: None,
|
||||
fingerprint: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_or_compute(
|
||||
&mut self,
|
||||
current_fingerprint: u64,
|
||||
compute_fn: impl FnOnce() -> f32,
|
||||
) -> f32 {
|
||||
if self.fingerprint == current_fingerprint {
|
||||
if let Some(v) = self.value {
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
let value = compute_fn();
|
||||
self.value = Some(value);
|
||||
self.fingerprint = current_fingerprint;
|
||||
value
|
||||
}
|
||||
|
||||
fn invalidate(&mut self) {
|
||||
self.value = None;
|
||||
}
|
||||
}
|
||||
|
||||
let mut cache = CachedEnergy::new();
|
||||
let mut compute_count = 0;
|
||||
|
||||
// First computation
|
||||
let v1 = cache.get_or_compute(1, || {
|
||||
compute_count += 1;
|
||||
10.0
|
||||
});
|
||||
assert_eq!(v1, 10.0);
|
||||
assert_eq!(compute_count, 1);
|
||||
|
||||
// Cached retrieval (same fingerprint)
|
||||
let v2 = cache.get_or_compute(1, || {
|
||||
compute_count += 1;
|
||||
10.0
|
||||
});
|
||||
assert_eq!(v2, 10.0);
|
||||
assert_eq!(compute_count, 1); // Not recomputed
|
||||
|
||||
// Fingerprint changed - should recompute
|
||||
let v3 = cache.get_or_compute(2, || {
|
||||
compute_count += 1;
|
||||
20.0
|
||||
});
|
||||
assert_eq!(v3, 20.0);
|
||||
assert_eq!(compute_count, 2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PARALLEL COMPUTATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_parallel_energy_computation() {
|
||||
// Energy computation should be parallelizable across edges
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
let mut states = HashMap::new();
|
||||
for i in 0..100 {
|
||||
states.insert(i as u64, vec![i as f32 / 100.0, 0.5]);
|
||||
}
|
||||
|
||||
let mut edges = Vec::new();
|
||||
for i in 0..99 {
|
||||
edges.push(TestEdge {
|
||||
source: i,
|
||||
target: i + 1,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
});
|
||||
}
|
||||
|
||||
// Simulate parallel computation
|
||||
let states = Arc::new(states);
|
||||
let edges = Arc::new(edges);
|
||||
let total = Arc::new(std::sync::Mutex::new(0.0f32));
|
||||
let num_threads = 4;
|
||||
let edges_per_thread = edges.len() / num_threads;
|
||||
|
||||
let handles: Vec<_> = (0..num_threads)
|
||||
.map(|t| {
|
||||
let states = Arc::clone(&states);
|
||||
let edges = Arc::clone(&edges);
|
||||
let total = Arc::clone(&total);
|
||||
|
||||
thread::spawn(move || {
|
||||
let start = t * edges_per_thread;
|
||||
let end = if t == num_threads - 1 {
|
||||
edges.len()
|
||||
} else {
|
||||
(t + 1) * edges_per_thread
|
||||
};
|
||||
|
||||
let mut local_sum = 0.0;
|
||||
for i in start..end {
|
||||
if let Some(energy) = edges[i].compute_energy(&states) {
|
||||
local_sum += energy;
|
||||
}
|
||||
}
|
||||
|
||||
let mut total = total.lock().unwrap();
|
||||
*total += local_sum;
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
let parallel_total = *total.lock().unwrap();
|
||||
|
||||
// Verify against sequential
|
||||
let (sequential_total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
assert!(
|
||||
(parallel_total - sequential_total).abs() < 1e-6,
|
||||
"Parallel and sequential computation should match"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SPECTRAL DRIFT DETECTION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_spectral_drift_detection() {
|
||||
// Spectral drift should be detected when eigenvalue distribution changes significantly
|
||||
|
||||
/// Simple eigenvalue snapshot
|
||||
struct EigenvalueSnapshot {
|
||||
eigenvalues: Vec<f32>,
|
||||
}
|
||||
|
||||
/// Wasserstein-like distance between eigenvalue distributions
|
||||
fn eigenvalue_distance(a: &[f32], b: &[f32]) -> f32 {
|
||||
if a.len() != b.len() {
|
||||
return f32::MAX;
|
||||
}
|
||||
|
||||
let mut a_sorted = a.to_vec();
|
||||
let mut b_sorted = b.to_vec();
|
||||
a_sorted.sort_by(|x, y| x.partial_cmp(y).unwrap());
|
||||
b_sorted.sort_by(|x, y| x.partial_cmp(y).unwrap());
|
||||
|
||||
a_sorted
|
||||
.iter()
|
||||
.zip(&b_sorted)
|
||||
.map(|(x, y)| (x - y).abs())
|
||||
.sum::<f32>()
|
||||
/ a.len() as f32
|
||||
}
|
||||
|
||||
let snapshot1 = EigenvalueSnapshot {
|
||||
eigenvalues: vec![0.1, 0.3, 0.5, 0.8, 1.0],
|
||||
};
|
||||
|
||||
// Small change - no drift
|
||||
let snapshot2 = EigenvalueSnapshot {
|
||||
eigenvalues: vec![0.11, 0.31, 0.49, 0.79, 1.01],
|
||||
};
|
||||
|
||||
// Large change - drift detected
|
||||
let snapshot3 = EigenvalueSnapshot {
|
||||
eigenvalues: vec![0.5, 0.6, 0.7, 0.9, 2.0],
|
||||
};
|
||||
|
||||
let dist_small = eigenvalue_distance(&snapshot1.eigenvalues, &snapshot2.eigenvalues);
|
||||
let dist_large = eigenvalue_distance(&snapshot1.eigenvalues, &snapshot3.eigenvalues);
|
||||
|
||||
let drift_threshold = 0.1;
|
||||
|
||||
assert!(
|
||||
dist_small < drift_threshold,
|
||||
"Small change should not trigger drift"
|
||||
);
|
||||
assert!(
|
||||
dist_large > drift_threshold,
|
||||
"Large change should trigger drift"
|
||||
);
|
||||
}
|
||||
715
vendor/ruvector/crates/prime-radiant/tests/integration/gate_tests.rs
vendored
Normal file
715
vendor/ruvector/crates/prime-radiant/tests/integration/gate_tests.rs
vendored
Normal file
@@ -0,0 +1,715 @@
|
||||
//! Integration tests for Coherence Gate and Compute Ladder
|
||||
//!
|
||||
//! Tests the Execution bounded context, verifying:
|
||||
//! - Gate decisions based on energy thresholds
|
||||
//! - Compute ladder escalation (O(1) -> O(n) -> O(n^o(1)))
|
||||
//! - Persistence detection for blocking decisions
|
||||
//! - Throttling behavior under high energy
|
||||
//! - Multi-lane processing
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// ============================================================================
|
||||
// TEST TYPES
|
||||
// ============================================================================
|
||||
|
||||
/// Gate decision outcomes
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum GateDecision {
|
||||
/// Green light - proceed without restriction
|
||||
Allow,
|
||||
/// Amber light - throttle the action
|
||||
Throttle { factor: u32 },
|
||||
/// Red light - block the action
|
||||
Block,
|
||||
}
|
||||
|
||||
/// Compute lane for escalation
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum ComputeLane {
|
||||
/// O(1) - Local tile check, immediate response
|
||||
Local,
|
||||
/// O(k) - k-hop neighborhood check
|
||||
Neighborhood { k: usize },
|
||||
/// O(n) - Full graph traversal
|
||||
Global,
|
||||
/// O(n^o(1)) - Subpolynomial spectral analysis
|
||||
Spectral,
|
||||
}
|
||||
|
||||
/// Threshold configuration
|
||||
#[derive(Clone, Debug)]
|
||||
struct ThresholdConfig {
|
||||
/// Energy below this -> Allow
|
||||
green_threshold: f32,
|
||||
/// Energy below this (but above green) -> Throttle
|
||||
amber_threshold: f32,
|
||||
/// Energy above this -> Block
|
||||
red_threshold: f32,
|
||||
/// Enable compute ladder escalation
|
||||
escalation_enabled: bool,
|
||||
/// Maximum escalation lane
|
||||
max_escalation_lane: ComputeLane,
|
||||
}
|
||||
|
||||
impl Default for ThresholdConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
green_threshold: 0.1,
|
||||
amber_threshold: 0.5,
|
||||
red_threshold: 1.0,
|
||||
escalation_enabled: true,
|
||||
max_escalation_lane: ComputeLane::Spectral,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Coherence gate engine
|
||||
struct CoherenceGate {
|
||||
config: ThresholdConfig,
|
||||
current_lane: ComputeLane,
|
||||
decision_history: VecDeque<GateDecision>,
|
||||
persistence_window: usize,
|
||||
}
|
||||
|
||||
impl CoherenceGate {
|
||||
fn new(config: ThresholdConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
current_lane: ComputeLane::Local,
|
||||
decision_history: VecDeque::new(),
|
||||
persistence_window: 5,
|
||||
}
|
||||
}
|
||||
|
||||
/// Make a gate decision based on current energy
|
||||
fn decide(&mut self, energy: f32) -> GateDecision {
|
||||
let decision = if energy < self.config.green_threshold {
|
||||
GateDecision::Allow
|
||||
} else if energy < self.config.amber_threshold {
|
||||
// Calculate throttle factor based on energy
|
||||
let ratio = (energy - self.config.green_threshold)
|
||||
/ (self.config.amber_threshold - self.config.green_threshold);
|
||||
let factor = (1.0 + ratio * 9.0) as u32; // 1x to 10x throttle
|
||||
GateDecision::Throttle { factor }
|
||||
} else {
|
||||
GateDecision::Block
|
||||
};
|
||||
|
||||
// Track history for persistence detection
|
||||
self.decision_history.push_back(decision);
|
||||
if self.decision_history.len() > self.persistence_window {
|
||||
self.decision_history.pop_front();
|
||||
}
|
||||
|
||||
decision
|
||||
}
|
||||
|
||||
/// Check if blocking is persistent
|
||||
fn is_persistent_block(&self) -> bool {
|
||||
if self.decision_history.len() < self.persistence_window {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.decision_history
|
||||
.iter()
|
||||
.all(|d| matches!(d, GateDecision::Block))
|
||||
}
|
||||
|
||||
/// Escalate to higher compute lane
|
||||
fn escalate(&mut self) -> Option<ComputeLane> {
|
||||
if !self.config.escalation_enabled {
|
||||
return None;
|
||||
}
|
||||
|
||||
let next_lane = match self.current_lane {
|
||||
ComputeLane::Local => Some(ComputeLane::Neighborhood { k: 2 }),
|
||||
ComputeLane::Neighborhood { k } if k < 5 => {
|
||||
Some(ComputeLane::Neighborhood { k: k + 1 })
|
||||
}
|
||||
ComputeLane::Neighborhood { .. } => Some(ComputeLane::Global),
|
||||
ComputeLane::Global => Some(ComputeLane::Spectral),
|
||||
ComputeLane::Spectral => None, // Already at max
|
||||
};
|
||||
|
||||
if let Some(lane) = next_lane {
|
||||
if lane <= self.config.max_escalation_lane {
|
||||
self.current_lane = lane;
|
||||
return Some(lane);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// De-escalate to lower compute lane
|
||||
fn deescalate(&mut self) -> Option<ComputeLane> {
|
||||
let prev_lane = match self.current_lane {
|
||||
ComputeLane::Local => None,
|
||||
ComputeLane::Neighborhood { k } if k > 2 => {
|
||||
Some(ComputeLane::Neighborhood { k: k - 1 })
|
||||
}
|
||||
ComputeLane::Neighborhood { .. } => Some(ComputeLane::Local),
|
||||
ComputeLane::Global => Some(ComputeLane::Neighborhood { k: 5 }),
|
||||
ComputeLane::Spectral => Some(ComputeLane::Global),
|
||||
};
|
||||
|
||||
if let Some(lane) = prev_lane {
|
||||
self.current_lane = lane;
|
||||
return Some(lane);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Get current compute lane
|
||||
fn current_lane(&self) -> ComputeLane {
|
||||
self.current_lane
|
||||
}
|
||||
|
||||
/// Clear decision history
|
||||
fn reset_history(&mut self) {
|
||||
self.decision_history.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// BASIC GATE DECISION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_gate_allows_low_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision = gate.decide(0.05);
|
||||
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_throttles_medium_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision = gate.decide(0.3);
|
||||
|
||||
match decision {
|
||||
GateDecision::Throttle { factor } => {
|
||||
assert!(factor >= 1);
|
||||
assert!(factor <= 10);
|
||||
}
|
||||
_ => panic!("Expected Throttle decision, got {:?}", decision),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_blocks_high_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision = gate.decide(0.8);
|
||||
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_blocks_above_red_threshold() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
red_threshold: 0.5,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.6);
|
||||
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_throttle_factor_increases_with_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision_low = gate.decide(0.15);
|
||||
let decision_high = gate.decide(0.45);
|
||||
|
||||
match (decision_low, decision_high) {
|
||||
(GateDecision::Throttle { factor: f1 }, GateDecision::Throttle { factor: f2 }) => {
|
||||
assert!(
|
||||
f2 > f1,
|
||||
"Higher energy should produce higher throttle factor"
|
||||
);
|
||||
}
|
||||
_ => panic!("Expected both to be Throttle decisions"),
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// THRESHOLD BOUNDARY TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_boundary_just_below_green() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.099);
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boundary_at_green() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// At the threshold, should still be Allow (< comparison)
|
||||
let decision = gate.decide(0.1);
|
||||
assert!(matches!(decision, GateDecision::Throttle { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boundary_just_below_amber() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
amber_threshold: 0.5,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.499);
|
||||
assert!(matches!(decision, GateDecision::Throttle { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boundary_at_amber() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
amber_threshold: 0.5,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.5);
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// COMPUTE LADDER ESCALATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_initial_lane_is_local() {
|
||||
let gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_from_local_to_neighborhood() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let new_lane = gate.escalate();
|
||||
|
||||
assert_eq!(new_lane, Some(ComputeLane::Neighborhood { k: 2 }));
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 2 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_through_neighborhood_k() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Local -> Neighborhood k=2
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 2 });
|
||||
|
||||
// Neighborhood k=2 -> k=3
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 3 });
|
||||
|
||||
// k=3 -> k=4
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 4 });
|
||||
|
||||
// k=4 -> k=5
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 5 });
|
||||
|
||||
// k=5 -> Global
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Global);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_to_spectral() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Escalate all the way
|
||||
while let Some(_) = gate.escalate() {
|
||||
// Keep escalating
|
||||
}
|
||||
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Spectral);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_respects_max_lane() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
max_escalation_lane: ComputeLane::Global,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Escalate to max
|
||||
while let Some(_) = gate.escalate() {}
|
||||
|
||||
// Should stop at Global, not Spectral
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Global);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_disabled() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
escalation_enabled: false,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let result = gate.escalate();
|
||||
|
||||
assert_eq!(result, None);
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deescalation_from_spectral() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Escalate to spectral
|
||||
while let Some(_) = gate.escalate() {}
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Spectral);
|
||||
|
||||
// Deescalate one step
|
||||
let lane = gate.deescalate();
|
||||
assert_eq!(lane, Some(ComputeLane::Global));
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Global);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deescalation_to_local() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Escalate a few times
|
||||
gate.escalate();
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 3 });
|
||||
|
||||
// Deescalate all the way
|
||||
while let Some(_) = gate.deescalate() {}
|
||||
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deescalation_from_local_returns_none() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let result = gate.deescalate();
|
||||
|
||||
assert_eq!(result, None);
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PERSISTENCE DETECTION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_no_persistence_initially() {
|
||||
let gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence_detected_after_window() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Block persistently
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.9); // Block
|
||||
}
|
||||
|
||||
assert!(gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_persistence_with_mixed_decisions() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Mix of decisions
|
||||
gate.decide(0.9); // Block
|
||||
gate.decide(0.05); // Allow
|
||||
gate.decide(0.9); // Block
|
||||
gate.decide(0.9); // Block
|
||||
gate.decide(0.9); // Block
|
||||
|
||||
// Not all blocks, so not persistent
|
||||
assert!(!gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence_window_sliding() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Start with allows
|
||||
for _ in 0..3 {
|
||||
gate.decide(0.05); // Allow
|
||||
}
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
|
||||
// Then all blocks
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.9); // Block
|
||||
}
|
||||
|
||||
// Now persistent
|
||||
assert!(gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_clears_persistence() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Build up persistence
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.9);
|
||||
}
|
||||
assert!(gate.is_persistent_block());
|
||||
|
||||
// Reset
|
||||
gate.reset_history();
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MULTI-LANE PROCESSING TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_lane_complexity_ordering() {
|
||||
// Verify lanes are properly ordered by complexity
|
||||
assert!(ComputeLane::Local < ComputeLane::Neighborhood { k: 2 });
|
||||
assert!(ComputeLane::Neighborhood { k: 2 } < ComputeLane::Neighborhood { k: 3 });
|
||||
assert!(ComputeLane::Neighborhood { k: 5 } < ComputeLane::Global);
|
||||
assert!(ComputeLane::Global < ComputeLane::Spectral);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automatic_escalation_on_block() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Simulate escalation policy: escalate on block
|
||||
let energy = 0.8;
|
||||
let decision = gate.decide(energy);
|
||||
|
||||
if matches!(decision, GateDecision::Block) {
|
||||
let escalated = gate.escalate().is_some();
|
||||
assert!(escalated, "Should escalate on block");
|
||||
}
|
||||
|
||||
// After one escalation
|
||||
assert!(gate.current_lane() > ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automatic_deescalation_on_allow() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// First, escalate
|
||||
gate.escalate();
|
||||
gate.escalate();
|
||||
assert!(gate.current_lane() > ComputeLane::Local);
|
||||
|
||||
// Then allow (low energy)
|
||||
let decision = gate.decide(0.01);
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
|
||||
// Deescalate after allow
|
||||
gate.deescalate();
|
||||
assert!(gate.current_lane() < ComputeLane::Neighborhood { k: 3 });
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CUSTOM THRESHOLD TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_custom_thresholds() {
|
||||
let config = ThresholdConfig {
|
||||
green_threshold: 0.05,
|
||||
amber_threshold: 0.15,
|
||||
red_threshold: 0.25,
|
||||
escalation_enabled: true,
|
||||
max_escalation_lane: ComputeLane::Global,
|
||||
};
|
||||
|
||||
let mut gate = CoherenceGate::new(config);
|
||||
|
||||
// Very low energy -> Allow
|
||||
assert_eq!(gate.decide(0.03), GateDecision::Allow);
|
||||
|
||||
// Low-medium energy -> Throttle
|
||||
assert!(matches!(gate.decide(0.10), GateDecision::Throttle { .. }));
|
||||
|
||||
// Medium energy -> Block (with these tight thresholds)
|
||||
assert_eq!(gate.decide(0.20), GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_thresholds() {
|
||||
let config = ThresholdConfig {
|
||||
green_threshold: 0.0,
|
||||
amber_threshold: 0.0,
|
||||
red_threshold: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut gate = CoherenceGate::new(config);
|
||||
|
||||
// Any positive energy should block
|
||||
assert_eq!(gate.decide(0.001), GateDecision::Block);
|
||||
assert_eq!(gate.decide(1.0), GateDecision::Block);
|
||||
|
||||
// Zero energy should... well, it's at the boundary
|
||||
// < 0 is Allow, >= 0 is the next category
|
||||
// With all thresholds at 0, any energy >= 0 goes to Block
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CONCURRENT GATE ACCESS TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_gate_thread_safety_simulation() {
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
// Wrap gate in mutex for thread-safe access
|
||||
let gate = Arc::new(Mutex::new(CoherenceGate::new(ThresholdConfig::default())));
|
||||
|
||||
let handles: Vec<_> = (0..4)
|
||||
.map(|i| {
|
||||
let gate = Arc::clone(&gate);
|
||||
thread::spawn(move || {
|
||||
let energy = 0.1 * (i as f32);
|
||||
let mut gate = gate.lock().unwrap();
|
||||
let decision = gate.decide(energy);
|
||||
(i, decision)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// Verify each thread got a decision
|
||||
assert_eq!(results.len(), 4);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ENERGY SPIKE HANDLING TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_spike_causes_immediate_block() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Normal operation
|
||||
for _ in 0..3 {
|
||||
let decision = gate.decide(0.05);
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
}
|
||||
|
||||
// Energy spike
|
||||
let decision = gate.decide(0.9);
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recovery_after_spike() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Spike
|
||||
gate.decide(0.9);
|
||||
assert!(!gate.is_persistent_block()); // Not persistent yet
|
||||
|
||||
// Recovery
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
|
||||
// All recent decisions should be Allow
|
||||
assert_eq!(gate.decide(0.05), GateDecision::Allow);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LANE LATENCY SIMULATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_lane_latency_simulation() {
|
||||
/// Simulated latency for each lane
|
||||
fn lane_latency(lane: ComputeLane) -> Duration {
|
||||
match lane {
|
||||
ComputeLane::Local => Duration::from_micros(10),
|
||||
ComputeLane::Neighborhood { k } => Duration::from_micros(100 * k as u64),
|
||||
ComputeLane::Global => Duration::from_millis(10),
|
||||
ComputeLane::Spectral => Duration::from_millis(100),
|
||||
}
|
||||
}
|
||||
|
||||
let lanes = vec![
|
||||
ComputeLane::Local,
|
||||
ComputeLane::Neighborhood { k: 2 },
|
||||
ComputeLane::Neighborhood { k: 5 },
|
||||
ComputeLane::Global,
|
||||
ComputeLane::Spectral,
|
||||
];
|
||||
|
||||
let latencies: Vec<_> = lanes.iter().map(|l| lane_latency(*l)).collect();
|
||||
|
||||
// Verify latencies are increasing
|
||||
for i in 1..latencies.len() {
|
||||
assert!(
|
||||
latencies[i] > latencies[i - 1],
|
||||
"Higher lanes should have higher latency"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// REAL-TIME BUDGET TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_local_lane_meets_budget() {
|
||||
// Local lane should complete in <1ms budget
|
||||
let budget = Duration::from_millis(1);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// Simulate local computation (just a decision)
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
for _ in 0..1000 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// 1000 decisions should still be fast
|
||||
assert!(
|
||||
elapsed < budget * 100, // Very generous for test environment
|
||||
"Local computation took too long: {:?}",
|
||||
elapsed
|
||||
);
|
||||
}
|
||||
1017
vendor/ruvector/crates/prime-radiant/tests/integration/governance_tests.rs
vendored
Normal file
1017
vendor/ruvector/crates/prime-radiant/tests/integration/governance_tests.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
528
vendor/ruvector/crates/prime-radiant/tests/integration/graph_tests.rs
vendored
Normal file
528
vendor/ruvector/crates/prime-radiant/tests/integration/graph_tests.rs
vendored
Normal file
@@ -0,0 +1,528 @@
|
||||
//! Integration tests for SheafGraph CRUD operations and dimension validation
|
||||
//!
|
||||
//! Tests the Knowledge Substrate bounded context, verifying:
|
||||
//! - Node creation, update, and deletion
|
||||
//! - Edge creation with restriction maps
|
||||
//! - Dimension compatibility validation
|
||||
//! - Subgraph extraction
|
||||
//! - Fingerprint-based change detection
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Test helper: Create a simple identity restriction map
|
||||
fn identity_restriction(dim: usize) -> Vec<Vec<f32>> {
|
||||
(0..dim)
|
||||
.map(|i| {
|
||||
let mut row = vec![0.0; dim];
|
||||
row[i] = 1.0;
|
||||
row
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Test helper: Create a projection restriction map (projects to first k dimensions)
|
||||
fn projection_restriction(input_dim: usize, output_dim: usize) -> Vec<Vec<f32>> {
|
||||
(0..output_dim)
|
||||
.map(|i| {
|
||||
let mut row = vec![0.0; input_dim];
|
||||
if i < input_dim {
|
||||
row[i] = 1.0;
|
||||
}
|
||||
row
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SHEAF NODE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_node_creation_with_valid_state() {
|
||||
// A node should be creatable with a valid state vector
|
||||
let state: Vec<f32> = vec![1.0, 0.5, 0.3, 0.2];
|
||||
let dimension = state.len();
|
||||
|
||||
// Verify state is preserved
|
||||
assert_eq!(state.len(), dimension);
|
||||
assert!((state[0] - 1.0).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_state_update_preserves_dimension() {
|
||||
// When updating a node's state, the dimension must remain constant
|
||||
let initial_state = vec![1.0, 0.5, 0.3];
|
||||
let new_state = vec![0.8, 0.6, 0.4];
|
||||
|
||||
assert_eq!(initial_state.len(), new_state.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_state_update_rejects_dimension_mismatch() {
|
||||
// Updating with a different dimension should fail
|
||||
let initial_state = vec![1.0, 0.5, 0.3];
|
||||
let wrong_state = vec![0.8, 0.6]; // Only 2 dimensions
|
||||
|
||||
assert_ne!(initial_state.len(), wrong_state.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_metadata_stores_custom_fields() {
|
||||
// Nodes should support custom metadata for domain-specific information
|
||||
let mut metadata: HashMap<String, String> = HashMap::new();
|
||||
metadata.insert("source".to_string(), "sensor_1".to_string());
|
||||
metadata.insert("confidence".to_string(), "0.95".to_string());
|
||||
|
||||
assert_eq!(metadata.get("source"), Some(&"sensor_1".to_string()));
|
||||
assert_eq!(metadata.len(), 2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SHEAF EDGE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_edge_creation_with_identity_restriction() {
|
||||
// Edges with identity restrictions should not transform states
|
||||
let dim = 4;
|
||||
let rho = identity_restriction(dim);
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
|
||||
// Apply restriction
|
||||
let result: Vec<f32> = rho
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(&state).map(|(a, b)| a * b).sum())
|
||||
.collect();
|
||||
|
||||
// Should be unchanged
|
||||
assert_eq!(result, state);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_creation_with_projection_restriction() {
|
||||
// Projection restrictions reduce dimension
|
||||
let rho = projection_restriction(4, 2);
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
|
||||
// Apply restriction
|
||||
let result: Vec<f32> = rho
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(&state).map(|(a, b)| a * b).sum())
|
||||
.collect();
|
||||
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(result, vec![1.0, 2.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_weight_affects_energy() {
|
||||
// Higher edge weights should amplify residual energy
|
||||
let residual = vec![0.1, 0.1, 0.1];
|
||||
let norm_sq: f32 = residual.iter().map(|x| x * x).sum();
|
||||
|
||||
let low_weight_energy = 1.0 * norm_sq;
|
||||
let high_weight_energy = 10.0 * norm_sq;
|
||||
|
||||
assert!(high_weight_energy > low_weight_energy);
|
||||
assert!((high_weight_energy / low_weight_energy - 10.0).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_restriction_dimension_validation() {
|
||||
// Restriction map dimensions must be compatible with node dimensions
|
||||
let source_dim = 4;
|
||||
let edge_dim = 2;
|
||||
|
||||
// Valid: source_dim -> edge_dim
|
||||
let valid_rho = projection_restriction(source_dim, edge_dim);
|
||||
assert_eq!(valid_rho.len(), edge_dim);
|
||||
assert_eq!(valid_rho[0].len(), source_dim);
|
||||
|
||||
// The restriction should accept 4D input and produce 2D output
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
let result: Vec<f32> = valid_rho
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(&state).map(|(a, b)| a * b).sum())
|
||||
.collect();
|
||||
assert_eq!(result.len(), edge_dim);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SHEAF GRAPH CRUD TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_graph_add_node() {
|
||||
// Adding a node should increase the node count
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
assert_eq!(nodes.len(), 1);
|
||||
|
||||
nodes.insert(2, vec![0.8, 0.6, 0.4]);
|
||||
assert_eq!(nodes.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_add_edge_validates_nodes_exist() {
|
||||
// Edges can only be created between existing nodes
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
let edges: HashMap<(u64, u64), f32> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
// Both nodes exist - edge should be allowed
|
||||
assert!(nodes.contains_key(&1));
|
||||
assert!(nodes.contains_key(&2));
|
||||
|
||||
// Non-existent node - edge should not be allowed
|
||||
assert!(!nodes.contains_key(&999));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_remove_node_cascades_to_edges() {
|
||||
// Removing a node should remove all incident edges
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
let mut edges: HashMap<(u64, u64), f32> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
nodes.insert(3, vec![0.7, 0.4]);
|
||||
|
||||
edges.insert((1, 2), 1.0);
|
||||
edges.insert((2, 3), 1.0);
|
||||
edges.insert((1, 3), 1.0);
|
||||
|
||||
// Remove node 1
|
||||
nodes.remove(&1);
|
||||
edges.retain(|(src, tgt), _| *src != 1 && *tgt != 1);
|
||||
|
||||
assert_eq!(nodes.len(), 2);
|
||||
assert_eq!(edges.len(), 1); // Only (2,3) remains
|
||||
assert!(edges.contains_key(&(2, 3)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_update_node_state() {
|
||||
// Updating a node state should trigger re-computation of affected edges
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
|
||||
// Update
|
||||
nodes.insert(1, vec![0.9, 0.6, 0.4]);
|
||||
|
||||
let state = nodes.get(&1).unwrap();
|
||||
assert!((state[0] - 0.9).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SUBGRAPH EXTRACTION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_subgraph_extraction_bfs() {
|
||||
// Extracting a k-hop subgraph around a center node
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
let mut adjacency: HashMap<u64, Vec<u64>> = HashMap::new();
|
||||
|
||||
// Create a chain: 1 - 2 - 3 - 4 - 5
|
||||
for i in 1..=5 {
|
||||
nodes.insert(i, vec![i as f32; 3]);
|
||||
}
|
||||
adjacency.insert(1, vec![2]);
|
||||
adjacency.insert(2, vec![1, 3]);
|
||||
adjacency.insert(3, vec![2, 4]);
|
||||
adjacency.insert(4, vec![3, 5]);
|
||||
adjacency.insert(5, vec![4]);
|
||||
|
||||
// Extract 1-hop subgraph around node 3
|
||||
fn extract_khop(center: u64, k: usize, adjacency: &HashMap<u64, Vec<u64>>) -> Vec<u64> {
|
||||
let mut visited = vec![center];
|
||||
let mut frontier = vec![center];
|
||||
|
||||
for _ in 0..k {
|
||||
let mut next_frontier = Vec::new();
|
||||
for node in &frontier {
|
||||
if let Some(neighbors) = adjacency.get(node) {
|
||||
for neighbor in neighbors {
|
||||
if !visited.contains(neighbor) {
|
||||
visited.push(*neighbor);
|
||||
next_frontier.push(*neighbor);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
frontier = next_frontier;
|
||||
}
|
||||
visited
|
||||
}
|
||||
|
||||
let subgraph = extract_khop(3, 1, &adjacency);
|
||||
assert!(subgraph.contains(&3)); // Center
|
||||
assert!(subgraph.contains(&2)); // 1-hop neighbor
|
||||
assert!(subgraph.contains(&4)); // 1-hop neighbor
|
||||
assert!(!subgraph.contains(&1)); // 2-hops away
|
||||
assert!(!subgraph.contains(&5)); // 2-hops away
|
||||
|
||||
let larger_subgraph = extract_khop(3, 2, &adjacency);
|
||||
assert_eq!(larger_subgraph.len(), 5); // All nodes within 2 hops
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// NAMESPACE AND SCOPE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_namespace_isolation() {
|
||||
// Nodes in different namespaces should be isolated
|
||||
let mut namespaces: HashMap<String, Vec<u64>> = HashMap::new();
|
||||
|
||||
namespaces.entry("finance".to_string()).or_default().push(1);
|
||||
namespaces.entry("finance".to_string()).or_default().push(2);
|
||||
namespaces.entry("medical".to_string()).or_default().push(3);
|
||||
|
||||
let finance_nodes = namespaces.get("finance").unwrap();
|
||||
let medical_nodes = namespaces.get("medical").unwrap();
|
||||
|
||||
assert_eq!(finance_nodes.len(), 2);
|
||||
assert_eq!(medical_nodes.len(), 1);
|
||||
|
||||
// No overlap
|
||||
for node in finance_nodes {
|
||||
assert!(!medical_nodes.contains(node));
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FINGERPRINT TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_fingerprint_changes_on_modification() {
|
||||
// Graph fingerprint should change when structure changes
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
fn compute_fingerprint(nodes: &HashMap<u64, Vec<f32>>, edges: &[(u64, u64)]) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
|
||||
let mut node_keys: Vec<_> = nodes.keys().collect();
|
||||
node_keys.sort();
|
||||
for key in node_keys {
|
||||
key.hash(&mut hasher);
|
||||
// Hash state values
|
||||
for val in nodes.get(key).unwrap() {
|
||||
val.to_bits().hash(&mut hasher);
|
||||
}
|
||||
}
|
||||
|
||||
for (src, tgt) in edges {
|
||||
src.hash(&mut hasher);
|
||||
tgt.hash(&mut hasher);
|
||||
}
|
||||
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
let edges1 = vec![(1, 2)];
|
||||
let fp1 = compute_fingerprint(&nodes, &edges1);
|
||||
|
||||
// Add a node
|
||||
nodes.insert(3, vec![0.7, 0.4]);
|
||||
let fp2 = compute_fingerprint(&nodes, &edges1);
|
||||
|
||||
assert_ne!(fp1, fp2);
|
||||
|
||||
// Add an edge
|
||||
let edges2 = vec![(1, 2), (2, 3)];
|
||||
let fp3 = compute_fingerprint(&nodes, &edges2);
|
||||
|
||||
assert_ne!(fp2, fp3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fingerprint_stable_without_modification() {
|
||||
// Fingerprint should be deterministic and stable
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
fn compute_fingerprint(nodes: &HashMap<u64, Vec<f32>>) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
let mut keys: Vec<_> = nodes.keys().collect();
|
||||
keys.sort();
|
||||
for key in keys {
|
||||
key.hash(&mut hasher);
|
||||
}
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
let fp1 = compute_fingerprint(&nodes);
|
||||
let fp2 = compute_fingerprint(&nodes);
|
||||
|
||||
assert_eq!(fp1, fp2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// DIMENSION VALIDATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_restriction_map_dimension_compatibility() {
|
||||
// Restriction map output dimension must equal edge stalk dimension
|
||||
struct RestrictionMap {
|
||||
matrix: Vec<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl RestrictionMap {
|
||||
fn new(matrix: Vec<Vec<f32>>) -> Result<Self, &'static str> {
|
||||
if matrix.is_empty() {
|
||||
return Err("Matrix cannot be empty");
|
||||
}
|
||||
let row_len = matrix[0].len();
|
||||
if !matrix.iter().all(|row| row.len() == row_len) {
|
||||
return Err("All rows must have same length");
|
||||
}
|
||||
Ok(Self { matrix })
|
||||
}
|
||||
|
||||
fn input_dim(&self) -> usize {
|
||||
self.matrix[0].len()
|
||||
}
|
||||
|
||||
fn output_dim(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
|
||||
fn apply(&self, input: &[f32]) -> Result<Vec<f32>, &'static str> {
|
||||
if input.len() != self.input_dim() {
|
||||
return Err("Input dimension mismatch");
|
||||
}
|
||||
Ok(self
|
||||
.matrix
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(input).map(|(a, b)| a * b).sum())
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
let rho = RestrictionMap::new(projection_restriction(4, 2)).unwrap();
|
||||
|
||||
// Valid input
|
||||
let result = rho.apply(&[1.0, 2.0, 3.0, 4.0]);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap().len(), 2);
|
||||
|
||||
// Invalid input (wrong dimension)
|
||||
let result = rho.apply(&[1.0, 2.0]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_creation_validates_stalk_dimensions() {
|
||||
// When creating an edge, both restriction maps must project to the same edge stalk dimension
|
||||
let source_dim = 4;
|
||||
let target_dim = 3;
|
||||
let edge_stalk_dim = 2;
|
||||
|
||||
let rho_source = projection_restriction(source_dim, edge_stalk_dim);
|
||||
let rho_target = projection_restriction(target_dim, edge_stalk_dim);
|
||||
|
||||
// Both should output the same dimension
|
||||
assert_eq!(rho_source.len(), edge_stalk_dim);
|
||||
assert_eq!(rho_target.len(), edge_stalk_dim);
|
||||
|
||||
// Source accepts source_dim input
|
||||
assert_eq!(rho_source[0].len(), source_dim);
|
||||
|
||||
// Target accepts target_dim input
|
||||
assert_eq!(rho_target[0].len(), target_dim);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CONCURRENT ACCESS TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_node_reads() {
|
||||
// Multiple threads should be able to read nodes concurrently
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
let nodes: Arc<HashMap<u64, Vec<f32>>> = Arc::new({
|
||||
let mut map = HashMap::new();
|
||||
for i in 0..100 {
|
||||
map.insert(i, vec![i as f32; 4]);
|
||||
}
|
||||
map
|
||||
});
|
||||
|
||||
let handles: Vec<_> = (0..4)
|
||||
.map(|_| {
|
||||
let nodes_clone = Arc::clone(&nodes);
|
||||
thread::spawn(move || {
|
||||
let mut sum = 0.0;
|
||||
for i in 0..100 {
|
||||
if let Some(state) = nodes_clone.get(&i) {
|
||||
sum += state[0];
|
||||
}
|
||||
}
|
||||
sum
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results: Vec<f32> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// All threads should compute the same sum
|
||||
let expected_sum: f32 = (0..100).map(|i| i as f32).sum();
|
||||
for result in results {
|
||||
assert!((result - expected_sum).abs() < f32::EPSILON);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LARGE GRAPH TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_large_graph_creation() {
|
||||
// Test creation of a moderately large graph
|
||||
let num_nodes = 1000;
|
||||
let dim = 16;
|
||||
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::with_capacity(num_nodes);
|
||||
|
||||
for i in 0..num_nodes {
|
||||
let state: Vec<f32> = (0..dim).map(|j| (i * dim + j) as f32 / 1000.0).collect();
|
||||
nodes.insert(i as u64, state);
|
||||
}
|
||||
|
||||
assert_eq!(nodes.len(), num_nodes);
|
||||
|
||||
// Verify random access
|
||||
let node_500 = nodes.get(&500).unwrap();
|
||||
assert_eq!(node_500.len(), dim);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sparse_graph_edge_ratio() {
|
||||
// Sparse graphs should have edges << nodes^2
|
||||
let num_nodes = 100;
|
||||
let avg_degree = 4;
|
||||
|
||||
let num_edges = (num_nodes * avg_degree) / 2; // Undirected
|
||||
let max_edges = num_nodes * (num_nodes - 1) / 2;
|
||||
let sparsity = num_edges as f64 / max_edges as f64;
|
||||
|
||||
assert!(sparsity < 0.1); // Less than 10% of possible edges
|
||||
}
|
||||
13
vendor/ruvector/crates/prime-radiant/tests/integration/mod.rs
vendored
Normal file
13
vendor/ruvector/crates/prime-radiant/tests/integration/mod.rs
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
//! Integration tests for Prime-Radiant Coherence Engine
|
||||
//!
|
||||
//! This module contains integration tests organized by bounded context:
|
||||
//!
|
||||
//! - `graph_tests`: SheafGraph CRUD operations and dimension validation
|
||||
//! - `coherence_tests`: Energy computation and incremental updates
|
||||
//! - `governance_tests`: Policy bundles and witness chain integrity
|
||||
//! - `gate_tests`: Compute ladder escalation and persistence detection
|
||||
|
||||
mod coherence_tests;
|
||||
mod gate_tests;
|
||||
mod governance_tests;
|
||||
mod graph_tests;
|
||||
Reference in New Issue
Block a user