Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
772
vendor/ruvector/crates/prime-radiant/tests/chaos_tests.rs
vendored
Normal file
772
vendor/ruvector/crates/prime-radiant/tests/chaos_tests.rs
vendored
Normal file
@@ -0,0 +1,772 @@
|
||||
//! Chaos Tests for Coherence Engine
|
||||
//!
|
||||
//! Tests system behavior under adversarial and random conditions:
|
||||
//! - Random energy spikes
|
||||
//! - Throttling behavior under load
|
||||
//! - Recovery from extreme states
|
||||
//! - Concurrent modifications
|
||||
//! - Edge case handling
|
||||
|
||||
use rand::prelude::*;
|
||||
use rand_chacha::ChaCha8Rng;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// ============================================================================
|
||||
// TEST INFRASTRUCTURE
|
||||
// ============================================================================
|
||||
|
||||
/// Coherence gate with throttling
|
||||
#[derive(Clone)]
|
||||
struct ThrottledGate {
|
||||
green_threshold: f32,
|
||||
amber_threshold: f32,
|
||||
red_threshold: f32,
|
||||
current_throttle: f32, // 0.0 = no throttle, 1.0 = max throttle
|
||||
blocked_count: u64,
|
||||
throttled_count: u64,
|
||||
allowed_count: u64,
|
||||
}
|
||||
|
||||
impl ThrottledGate {
|
||||
fn new(green: f32, amber: f32, red: f32) -> Self {
|
||||
Self {
|
||||
green_threshold: green,
|
||||
amber_threshold: amber,
|
||||
red_threshold: red,
|
||||
current_throttle: 0.0,
|
||||
blocked_count: 0,
|
||||
throttled_count: 0,
|
||||
allowed_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn decide(&mut self, energy: f32) -> Decision {
|
||||
if energy < self.green_threshold {
|
||||
self.current_throttle = (self.current_throttle - 0.1).max(0.0);
|
||||
self.allowed_count += 1;
|
||||
Decision::Allow
|
||||
} else if energy < self.amber_threshold {
|
||||
let throttle_factor =
|
||||
(energy - self.green_threshold) / (self.amber_threshold - self.green_threshold);
|
||||
self.current_throttle = (self.current_throttle + throttle_factor * 0.1).min(1.0);
|
||||
self.throttled_count += 1;
|
||||
Decision::Throttle {
|
||||
factor: throttle_factor,
|
||||
}
|
||||
} else {
|
||||
self.current_throttle = 1.0;
|
||||
self.blocked_count += 1;
|
||||
Decision::Block
|
||||
}
|
||||
}
|
||||
|
||||
fn should_process(&self, rng: &mut impl Rng) -> bool {
|
||||
if self.current_throttle <= 0.0 {
|
||||
true
|
||||
} else {
|
||||
rng.gen::<f32>() > self.current_throttle
|
||||
}
|
||||
}
|
||||
|
||||
fn stats(&self) -> GateStats {
|
||||
let total = self.allowed_count + self.throttled_count + self.blocked_count;
|
||||
GateStats {
|
||||
total_decisions: total,
|
||||
allowed: self.allowed_count,
|
||||
throttled: self.throttled_count,
|
||||
blocked: self.blocked_count,
|
||||
allow_rate: if total > 0 {
|
||||
self.allowed_count as f64 / total as f64
|
||||
} else {
|
||||
1.0
|
||||
},
|
||||
block_rate: if total > 0 {
|
||||
self.blocked_count as f64 / total as f64
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
enum Decision {
|
||||
Allow,
|
||||
Throttle { factor: f32 },
|
||||
Block,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct GateStats {
|
||||
total_decisions: u64,
|
||||
allowed: u64,
|
||||
throttled: u64,
|
||||
blocked: u64,
|
||||
allow_rate: f64,
|
||||
block_rate: f64,
|
||||
}
|
||||
|
||||
/// Simple coherence state for chaos testing
|
||||
struct ChaosState {
|
||||
nodes: HashMap<u64, Vec<f32>>,
|
||||
edges: HashMap<(u64, u64), f32>,
|
||||
operation_count: AtomicU64,
|
||||
}
|
||||
|
||||
impl ChaosState {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
nodes: HashMap::new(),
|
||||
edges: HashMap::new(),
|
||||
operation_count: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_node(&mut self, id: u64, state: Vec<f32>) {
|
||||
self.nodes.insert(id, state);
|
||||
self.operation_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
fn add_edge(&mut self, src: u64, tgt: u64, weight: f32) {
|
||||
if self.nodes.contains_key(&src) && self.nodes.contains_key(&tgt) {
|
||||
self.edges.insert((src, tgt), weight);
|
||||
self.operation_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_energy(&self) -> f32 {
|
||||
let mut total = 0.0;
|
||||
for ((src, tgt), weight) in &self.edges {
|
||||
if let (Some(s), Some(t)) = (self.nodes.get(src), self.nodes.get(tgt)) {
|
||||
let dim = s.len().min(t.len());
|
||||
let residual: f32 = s
|
||||
.iter()
|
||||
.take(dim)
|
||||
.zip(t.iter().take(dim))
|
||||
.map(|(a, b)| (a - b).powi(2))
|
||||
.sum();
|
||||
total += weight * residual;
|
||||
}
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
fn perturb_node(&mut self, id: u64, rng: &mut impl Rng) {
|
||||
if let Some(state) = self.nodes.get_mut(&id) {
|
||||
for val in state.iter_mut() {
|
||||
*val += rng.gen_range(-0.1..0.1);
|
||||
}
|
||||
self.operation_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: RANDOM ENERGY SPIKES
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_random_energy_spikes() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(42);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
let mut energies = Vec::new();
|
||||
let mut decisions = Vec::new();
|
||||
|
||||
// Generate random energy values with occasional spikes
|
||||
for _ in 0..1000 {
|
||||
let base = rng.gen_range(0.0..0.2);
|
||||
let spike = if rng.gen_bool(0.1) {
|
||||
rng.gen_range(0.0..2.0) // 10% chance of spike
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let energy = base + spike;
|
||||
energies.push(energy);
|
||||
decisions.push(gate.decide(energy));
|
||||
}
|
||||
|
||||
let stats = gate.stats();
|
||||
|
||||
// Verify system handled spikes appropriately
|
||||
// With 10% spike rate and spikes going up to 2.0 (well above amber threshold),
|
||||
// we expect a mix of decisions
|
||||
assert!(stats.blocked > 0, "Should have blocked some spikes");
|
||||
assert!(
|
||||
stats.allowed > 0,
|
||||
"Should have allowed low-energy operations"
|
||||
);
|
||||
// Allow rate depends on threshold settings - with spikes going to amber/red zone,
|
||||
// we expect at least some operations to be allowed (the 90% non-spike operations)
|
||||
assert!(
|
||||
stats.allow_rate > 0.3,
|
||||
"Should have allowed at least 30% of operations (got {})",
|
||||
stats.allow_rate
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sustained_spike_triggers_persistent_block() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(123);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Normal operations
|
||||
for _ in 0..50 {
|
||||
let energy = rng.gen_range(0.0..0.1);
|
||||
gate.decide(energy);
|
||||
}
|
||||
|
||||
assert!(
|
||||
gate.current_throttle < 0.1,
|
||||
"Should have low throttle initially"
|
||||
);
|
||||
|
||||
// Sustained high energy
|
||||
for _ in 0..20 {
|
||||
gate.decide(0.8);
|
||||
}
|
||||
|
||||
assert!(
|
||||
gate.current_throttle > 0.5,
|
||||
"Should have high throttle after sustained spikes"
|
||||
);
|
||||
|
||||
// Verify recovery is gradual
|
||||
let throttle_before = gate.current_throttle;
|
||||
for _ in 0..10 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
assert!(
|
||||
gate.current_throttle < throttle_before,
|
||||
"Throttle should decrease after normal operations"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spike_patterns() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(456);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Pattern 1: Regular low-high oscillation
|
||||
for i in 0..100 {
|
||||
let energy = if i % 2 == 0 { 0.05 } else { 0.8 };
|
||||
gate.decide(energy);
|
||||
}
|
||||
|
||||
let stats1 = gate.stats();
|
||||
|
||||
// Reset
|
||||
gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Pattern 2: Bursts
|
||||
for burst in 0..10 {
|
||||
// Low energy burst
|
||||
for _ in 0..8 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
// High energy burst
|
||||
for _ in 0..2 {
|
||||
gate.decide(0.9);
|
||||
}
|
||||
}
|
||||
|
||||
let stats2 = gate.stats();
|
||||
|
||||
// Both patterns have the same 20% high-energy ratio but different distributions.
|
||||
// Pattern 1: random distribution across iterations
|
||||
// Pattern 2: burst pattern (8 low, 2 high per burst)
|
||||
// The key invariant is that both should have some blocks from the high-energy operations
|
||||
assert!(stats1.blocked > 0, "Pattern 1 should have blocks");
|
||||
assert!(stats2.blocked > 0, "Pattern 2 should have blocks");
|
||||
// Both should process 100 operations
|
||||
assert_eq!(stats1.total_decisions, 100);
|
||||
assert_eq!(stats2.total_decisions, 100);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: THROTTLING UNDER LOAD
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_throttling_fairness() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(789);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Put gate into throttled state
|
||||
for _ in 0..10 {
|
||||
gate.decide(0.3);
|
||||
}
|
||||
|
||||
// Count how many requests get through
|
||||
let mut processed = 0;
|
||||
let mut total = 0;
|
||||
|
||||
for _ in 0..1000 {
|
||||
total += 1;
|
||||
if gate.should_process(&mut rng) {
|
||||
processed += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let process_rate = processed as f64 / total as f64;
|
||||
|
||||
// Should be roughly inverse of throttle
|
||||
let expected_rate = 1.0 - gate.current_throttle as f64;
|
||||
assert!(
|
||||
(process_rate - expected_rate).abs() < 0.1,
|
||||
"Process rate {} should be close to expected {}",
|
||||
process_rate,
|
||||
expected_rate
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_throttling_response_time() {
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Measure response time under different throttle states
|
||||
let measure_response = |gate: &mut ThrottledGate, energy: f32| {
|
||||
let start = Instant::now();
|
||||
for _ in 0..100 {
|
||||
gate.decide(energy);
|
||||
}
|
||||
start.elapsed()
|
||||
};
|
||||
|
||||
let low_energy_time = measure_response(&mut gate, 0.05);
|
||||
|
||||
gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
let high_energy_time = measure_response(&mut gate, 0.8);
|
||||
|
||||
// Decision time should be similar regardless of energy level
|
||||
let ratio = high_energy_time.as_nanos() as f64 / low_energy_time.as_nanos() as f64;
|
||||
assert!(
|
||||
ratio < 10.0,
|
||||
"High energy decisions shouldn't be much slower (ratio: {})",
|
||||
ratio
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_progressive_throttling() {
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
let mut throttle_history = Vec::new();
|
||||
|
||||
// Gradually increase energy
|
||||
for i in 0..100 {
|
||||
let energy = i as f32 / 100.0; // 0.0 to 1.0
|
||||
gate.decide(energy);
|
||||
throttle_history.push(gate.current_throttle);
|
||||
}
|
||||
|
||||
// Throttle should generally increase
|
||||
let increasing_segments = throttle_history
|
||||
.windows(10)
|
||||
.filter(|w| w.last() > w.first())
|
||||
.count();
|
||||
|
||||
assert!(
|
||||
increasing_segments > 5,
|
||||
"Throttle should generally increase with energy"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: CONCURRENT MODIFICATIONS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_state_modifications() {
|
||||
let state = Arc::new(Mutex::new(ChaosState::new()));
|
||||
|
||||
// Initialize some nodes
|
||||
{
|
||||
let mut s = state.lock().unwrap();
|
||||
for i in 0..100 {
|
||||
s.add_node(i, vec![i as f32 / 100.0; 4]);
|
||||
}
|
||||
for i in 0..99 {
|
||||
s.add_edge(i, i + 1, 1.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn threads that concurrently modify state
|
||||
let handles: Vec<_> = (0..4)
|
||||
.map(|thread_id| {
|
||||
let state = Arc::clone(&state);
|
||||
thread::spawn(move || {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(thread_id);
|
||||
for _ in 0..100 {
|
||||
let mut s = state.lock().unwrap();
|
||||
let node_id = rng.gen_range(0..100);
|
||||
s.perturb_node(node_id, &mut rng);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
// State should still be valid
|
||||
let s = state.lock().unwrap();
|
||||
assert_eq!(s.nodes.len(), 100);
|
||||
assert_eq!(s.edges.len(), 99);
|
||||
|
||||
// Energy should be computable
|
||||
let energy = s.compute_energy();
|
||||
assert!(energy.is_finite(), "Energy should be finite");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_energy_computation() {
|
||||
let state = Arc::new(Mutex::new(ChaosState::new()));
|
||||
|
||||
// Initialize
|
||||
{
|
||||
let mut s = state.lock().unwrap();
|
||||
for i in 0..50 {
|
||||
s.add_node(i, vec![i as f32 / 50.0; 4]);
|
||||
}
|
||||
for i in 0..49 {
|
||||
s.add_edge(i, i + 1, 1.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Concurrent energy computations
|
||||
let handles: Vec<_> = (0..8)
|
||||
.map(|_| {
|
||||
let state = Arc::clone(&state);
|
||||
thread::spawn(move || {
|
||||
let s = state.lock().unwrap();
|
||||
s.compute_energy()
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let energies: Vec<f32> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// All computations should give the same result
|
||||
let first = energies[0];
|
||||
for e in &energies {
|
||||
assert!(
|
||||
(e - first).abs() < 1e-6,
|
||||
"Concurrent computations should give same result"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: EXTREME VALUES
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_extreme_energy_values() {
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Very small energy
|
||||
let decision = gate.decide(1e-10);
|
||||
assert_eq!(decision, Decision::Allow);
|
||||
|
||||
// Very large energy
|
||||
let decision = gate.decide(1e10);
|
||||
assert_eq!(decision, Decision::Block);
|
||||
|
||||
// Zero
|
||||
let decision = gate.decide(0.0);
|
||||
assert_eq!(decision, Decision::Allow);
|
||||
|
||||
// Negative (should still work, though unusual)
|
||||
let decision = gate.decide(-0.1);
|
||||
assert_eq!(decision, Decision::Allow); // Less than green threshold
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extreme_state_values() {
|
||||
let mut state = ChaosState::new();
|
||||
|
||||
// Very large state values
|
||||
state.add_node(1, vec![1e10, -1e10, 1e10, -1e10]);
|
||||
state.add_node(2, vec![-1e10, 1e10, -1e10, 1e10]);
|
||||
state.add_edge(1, 2, 1.0);
|
||||
|
||||
let energy = state.compute_energy();
|
||||
assert!(energy.is_finite(), "Energy should handle large values");
|
||||
assert!(energy > 0.0, "Energy should be positive");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_many_small_perturbations() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(999);
|
||||
let mut state = ChaosState::new();
|
||||
|
||||
// Create a stable baseline
|
||||
state.add_node(1, vec![0.5, 0.5, 0.5, 0.5]);
|
||||
state.add_node(2, vec![0.5, 0.5, 0.5, 0.5]);
|
||||
state.add_edge(1, 2, 1.0);
|
||||
|
||||
let initial_energy = state.compute_energy();
|
||||
|
||||
// Many small perturbations
|
||||
for _ in 0..1000 {
|
||||
state.perturb_node(1, &mut rng);
|
||||
state.perturb_node(2, &mut rng);
|
||||
}
|
||||
|
||||
let final_energy = state.compute_energy();
|
||||
|
||||
// Energy should still be reasonable (not exploded)
|
||||
assert!(final_energy.is_finite());
|
||||
// Random walk should increase variance
|
||||
assert!(final_energy > initial_energy * 0.1 || final_energy < initial_energy * 10.0);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: RECOVERY SCENARIOS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_recovery_from_blocked_state() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(111);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Drive into blocked state
|
||||
for _ in 0..20 {
|
||||
gate.decide(0.9);
|
||||
}
|
||||
|
||||
assert!(
|
||||
gate.current_throttle > 0.9,
|
||||
"Should be in high throttle state"
|
||||
);
|
||||
|
||||
// Recover with low energy
|
||||
let mut recovery_steps = 0;
|
||||
while gate.current_throttle > 0.1 && recovery_steps < 200 {
|
||||
gate.decide(0.05);
|
||||
recovery_steps += 1;
|
||||
}
|
||||
|
||||
assert!(
|
||||
recovery_steps < 200,
|
||||
"Should recover within reasonable time"
|
||||
);
|
||||
assert!(
|
||||
gate.current_throttle < 0.2,
|
||||
"Should have low throttle after recovery"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_oscillation_dampening() {
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
// Oscillate between extremes
|
||||
let mut throttle_variance = Vec::new();
|
||||
for cycle in 0..10 {
|
||||
// High phase
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.8);
|
||||
}
|
||||
// Low phase
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
throttle_variance.push(gate.current_throttle);
|
||||
}
|
||||
|
||||
// Throttle should not oscillate wildly
|
||||
let max = throttle_variance
|
||||
.iter()
|
||||
.cloned()
|
||||
.fold(f32::NEG_INFINITY, f32::max);
|
||||
let min = throttle_variance
|
||||
.iter()
|
||||
.cloned()
|
||||
.fold(f32::INFINITY, f32::min);
|
||||
|
||||
// Should settle to some stable-ish range
|
||||
// (This is a soft check - exact behavior depends on parameters)
|
||||
assert!(max - min < 1.0, "Throttle oscillation should be bounded");
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: RANDOM GRAPH MODIFICATIONS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_random_graph_operations() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(222);
|
||||
let mut state = ChaosState::new();
|
||||
|
||||
// Random operations
|
||||
for _ in 0..1000 {
|
||||
let op = rng.gen_range(0..3);
|
||||
match op {
|
||||
0 => {
|
||||
// Add node
|
||||
let id = rng.gen_range(0..100);
|
||||
let dim = rng.gen_range(2..8);
|
||||
let values: Vec<f32> = (0..dim).map(|_| rng.gen_range(-1.0..1.0)).collect();
|
||||
state.add_node(id, values);
|
||||
}
|
||||
1 => {
|
||||
// Add edge
|
||||
let src = rng.gen_range(0..100);
|
||||
let tgt = rng.gen_range(0..100);
|
||||
if src != tgt {
|
||||
let weight = rng.gen_range(0.1..2.0);
|
||||
state.add_edge(src, tgt, weight);
|
||||
}
|
||||
}
|
||||
2 => {
|
||||
// Perturb existing node
|
||||
let id = rng.gen_range(0..100);
|
||||
state.perturb_node(id, &mut rng);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
// State should be valid
|
||||
assert!(state.nodes.len() <= 100);
|
||||
let energy = state.compute_energy();
|
||||
assert!(energy.is_finite());
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: STRESS TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_rapid_fire_decisions() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(333);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
|
||||
let start = Instant::now();
|
||||
let mut count = 0;
|
||||
|
||||
while start.elapsed() < Duration::from_millis(100) {
|
||||
let energy = rng.gen_range(0.0..0.6);
|
||||
gate.decide(energy);
|
||||
count += 1;
|
||||
}
|
||||
|
||||
assert!(count > 1000, "Should process many decisions quickly");
|
||||
|
||||
let stats = gate.stats();
|
||||
assert_eq!(stats.total_decisions, count);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_stability() {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(444);
|
||||
let mut state = ChaosState::new();
|
||||
|
||||
// Many cycles of add/modify
|
||||
for cycle in 0..100 {
|
||||
// Add phase
|
||||
for i in 0..10 {
|
||||
let id = cycle * 10 + i;
|
||||
state.add_node(id, vec![rng.gen::<f32>(); 4]);
|
||||
}
|
||||
|
||||
// Modify phase
|
||||
for _ in 0..50 {
|
||||
let id = rng.gen_range(0..(cycle + 1) * 10);
|
||||
state.perturb_node(id, &mut rng);
|
||||
}
|
||||
|
||||
// Energy check
|
||||
let energy = state.compute_energy();
|
||||
assert!(
|
||||
energy.is_finite(),
|
||||
"Energy should be finite at cycle {}",
|
||||
cycle
|
||||
);
|
||||
}
|
||||
|
||||
assert!(state.nodes.len() > 0);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CHAOS: DETERMINISTIC CHAOS (SEEDED RANDOM)
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_seeded_chaos_reproducible() {
|
||||
fn run_chaos(seed: u64) -> (f32, u64, u64, u64) {
|
||||
let mut rng = ChaCha8Rng::seed_from_u64(seed);
|
||||
let mut gate = ThrottledGate::new(0.1, 0.5, 1.0);
|
||||
let mut state = ChaosState::new();
|
||||
|
||||
// Add nodes with random states
|
||||
for i in 0..100 {
|
||||
state.add_node(i, vec![rng.gen::<f32>(); 4]);
|
||||
}
|
||||
|
||||
// Add edges to create energy (without edges, compute_energy is always 0)
|
||||
for i in 0..50 {
|
||||
let src = rng.gen_range(0..100);
|
||||
let tgt = rng.gen_range(0..100);
|
||||
if src != tgt {
|
||||
state.add_edge(src, tgt, rng.gen_range(0.1..1.0));
|
||||
}
|
||||
}
|
||||
|
||||
for _ in 0..500 {
|
||||
let energy = state.compute_energy();
|
||||
gate.decide(energy / 100.0);
|
||||
let node_id = rng.gen_range(0..100);
|
||||
state.perturb_node(node_id, &mut rng);
|
||||
}
|
||||
|
||||
let stats = gate.stats();
|
||||
(
|
||||
state.compute_energy(),
|
||||
stats.allowed,
|
||||
stats.throttled,
|
||||
stats.blocked,
|
||||
)
|
||||
}
|
||||
|
||||
let result1 = run_chaos(12345);
|
||||
let result2 = run_chaos(12345);
|
||||
|
||||
// Same seed should produce same results (using approximate comparison for floats
|
||||
// due to potential floating point ordering differences)
|
||||
assert!(
|
||||
(result1.0 - result2.0).abs() < 0.01,
|
||||
"Same seed should produce same energy: {} vs {}",
|
||||
result1.0,
|
||||
result2.0
|
||||
);
|
||||
assert_eq!(
|
||||
result1.1, result2.1,
|
||||
"Same seed should produce same allowed count"
|
||||
);
|
||||
assert_eq!(
|
||||
result1.2, result2.2,
|
||||
"Same seed should produce same throttled count"
|
||||
);
|
||||
assert_eq!(
|
||||
result1.3, result2.3,
|
||||
"Same seed should produce same blocked count"
|
||||
);
|
||||
|
||||
// Use very different seeds to ensure different random sequences
|
||||
let result3 = run_chaos(99999);
|
||||
// At minimum, the final energy should differ between different seeds
|
||||
assert!(
|
||||
(result1.0 - result3.0).abs() > 0.001 || result1.1 != result3.1 || result1.2 != result3.2,
|
||||
"Different seeds should produce different results: seed1={:?}, seed2={:?}",
|
||||
result1,
|
||||
result3
|
||||
);
|
||||
}
|
||||
520
vendor/ruvector/crates/prime-radiant/tests/gpu_coherence_tests.rs
vendored
Normal file
520
vendor/ruvector/crates/prime-radiant/tests/gpu_coherence_tests.rs
vendored
Normal file
@@ -0,0 +1,520 @@
|
||||
//! GPU Coherence Engine Tests
|
||||
//!
|
||||
//! Comprehensive tests verifying GPU computation results match CPU results
|
||||
//! within floating-point tolerance. These tests ensure correctness of:
|
||||
//!
|
||||
//! - GPU buffer management and data transfer
|
||||
//! - Parallel residual computation
|
||||
//! - Energy aggregation with tree reduction
|
||||
//! - CPU fallback mechanism
|
||||
//!
|
||||
//! Run with: cargo test --features gpu
|
||||
|
||||
#![cfg(feature = "gpu")]
|
||||
|
||||
use prime_radiant::gpu::{
|
||||
BufferUsage, GpuBuffer, GpuBufferManager, GpuCoherenceEngine, GpuConfig, GpuEdge, GpuError,
|
||||
GpuParams, GpuRestrictionMap, GpuResult,
|
||||
};
|
||||
use prime_radiant::substrate::{
|
||||
EdgeId, NodeId, SheafEdge, SheafEdgeBuilder, SheafGraph, SheafNode, SheafNodeBuilder,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Floating point tolerance for GPU vs CPU comparison
|
||||
const TOLERANCE: f32 = 1e-5;
|
||||
|
||||
/// Create a simple test graph with 3 nodes forming a triangle
|
||||
fn create_triangle_graph() -> SheafGraph {
|
||||
let graph = SheafGraph::new();
|
||||
|
||||
// Create three nodes with states
|
||||
let node1 = SheafNodeBuilder::new()
|
||||
.state_from_slice(&[1.0, 0.0, 0.0])
|
||||
.namespace("test")
|
||||
.build();
|
||||
let node2 = SheafNodeBuilder::new()
|
||||
.state_from_slice(&[0.0, 1.0, 0.0])
|
||||
.namespace("test")
|
||||
.build();
|
||||
let node3 = SheafNodeBuilder::new()
|
||||
.state_from_slice(&[0.0, 0.0, 1.0])
|
||||
.namespace("test")
|
||||
.build();
|
||||
|
||||
let id1 = graph.add_node(node1);
|
||||
let id2 = graph.add_node(node2);
|
||||
let id3 = graph.add_node(node3);
|
||||
|
||||
// Create edges with identity restrictions
|
||||
let edge12 = SheafEdgeBuilder::new(id1, id2)
|
||||
.identity_restrictions(3)
|
||||
.weight(1.0)
|
||||
.namespace("test")
|
||||
.build();
|
||||
let edge23 = SheafEdgeBuilder::new(id2, id3)
|
||||
.identity_restrictions(3)
|
||||
.weight(1.0)
|
||||
.namespace("test")
|
||||
.build();
|
||||
let edge31 = SheafEdgeBuilder::new(id3, id1)
|
||||
.identity_restrictions(3)
|
||||
.weight(1.0)
|
||||
.namespace("test")
|
||||
.build();
|
||||
|
||||
graph.add_edge(edge12).unwrap();
|
||||
graph.add_edge(edge23).unwrap();
|
||||
graph.add_edge(edge31).unwrap();
|
||||
|
||||
graph
|
||||
}
|
||||
|
||||
/// Create a coherent graph where all nodes have identical states
|
||||
fn create_coherent_graph() -> SheafGraph {
|
||||
let graph = SheafGraph::new();
|
||||
|
||||
// All nodes have the same state
|
||||
let state = [1.0, 1.0, 1.0];
|
||||
|
||||
let node1 = SheafNodeBuilder::new().state_from_slice(&state).build();
|
||||
let node2 = SheafNodeBuilder::new().state_from_slice(&state).build();
|
||||
|
||||
let id1 = graph.add_node(node1);
|
||||
let id2 = graph.add_node(node2);
|
||||
|
||||
let edge = SheafEdgeBuilder::new(id1, id2)
|
||||
.identity_restrictions(3)
|
||||
.weight(1.0)
|
||||
.build();
|
||||
|
||||
graph.add_edge(edge).unwrap();
|
||||
graph
|
||||
}
|
||||
|
||||
/// Create a larger graph for performance testing
|
||||
fn create_large_graph(num_nodes: usize, edges_per_node: usize) -> SheafGraph {
|
||||
let graph = SheafGraph::new();
|
||||
let state_dim = 64;
|
||||
|
||||
// Create nodes with random states
|
||||
let mut node_ids = Vec::with_capacity(num_nodes);
|
||||
for i in 0..num_nodes {
|
||||
let state: Vec<f32> = (0..state_dim)
|
||||
.map(|j| ((i * state_dim + j) as f32 * 0.01).sin())
|
||||
.collect();
|
||||
|
||||
let node = SheafNodeBuilder::new().state_from_slice(&state).build();
|
||||
|
||||
node_ids.push(graph.add_node(node));
|
||||
}
|
||||
|
||||
// Create edges
|
||||
for i in 0..num_nodes {
|
||||
for j in 1..=edges_per_node {
|
||||
let target_idx = (i + j) % num_nodes;
|
||||
if i != target_idx {
|
||||
let edge = SheafEdgeBuilder::new(node_ids[i], node_ids[target_idx])
|
||||
.identity_restrictions(state_dim)
|
||||
.weight(1.0)
|
||||
.build();
|
||||
|
||||
// Ignore duplicate edges
|
||||
let _ = graph.add_edge(edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
graph
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GPU Configuration Tests
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_gpu_config_default() {
|
||||
let config = GpuConfig::default();
|
||||
|
||||
assert!(config.enable_fallback);
|
||||
assert_eq!(config.beta, 1.0);
|
||||
assert!(config.threshold_lane0 < config.threshold_lane1);
|
||||
assert!(config.threshold_lane1 < config.threshold_lane2);
|
||||
assert!(config.timeout_ms > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gpu_config_custom() {
|
||||
let config = GpuConfig {
|
||||
enable_fallback: false,
|
||||
beta: 2.0,
|
||||
threshold_lane0: 0.05,
|
||||
threshold_lane1: 0.5,
|
||||
threshold_lane2: 5.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(!config.enable_fallback);
|
||||
assert_eq!(config.beta, 2.0);
|
||||
assert_eq!(config.threshold_lane0, 0.05);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GPU Buffer Tests
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_gpu_params_alignment() {
|
||||
// GPU struct alignment is critical for correct computation
|
||||
assert_eq!(std::mem::size_of::<GpuParams>(), 32);
|
||||
assert_eq!(std::mem::align_of::<GpuParams>(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gpu_edge_alignment() {
|
||||
assert_eq!(std::mem::size_of::<GpuEdge>(), 32);
|
||||
assert_eq!(std::mem::align_of::<GpuEdge>(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gpu_restriction_map_alignment() {
|
||||
assert_eq!(std::mem::size_of::<GpuRestrictionMap>(), 32);
|
||||
assert_eq!(std::mem::align_of::<GpuRestrictionMap>(), 4);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CPU vs GPU Comparison Tests
|
||||
// ============================================================================
|
||||
|
||||
/// Test that GPU energy matches CPU energy for triangle graph
|
||||
#[tokio::test]
|
||||
async fn test_gpu_cpu_energy_match_triangle() {
|
||||
let graph = create_triangle_graph();
|
||||
|
||||
// Compute CPU energy
|
||||
let cpu_energy = graph.compute_energy();
|
||||
|
||||
// Try GPU computation
|
||||
let config = GpuConfig::default();
|
||||
match GpuCoherenceEngine::try_new(config).await {
|
||||
Some(mut engine) => {
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let gpu_energy = engine.compute_energy().await.unwrap();
|
||||
|
||||
// Compare total energies
|
||||
let diff = (cpu_energy.total_energy - gpu_energy.total_energy).abs();
|
||||
assert!(
|
||||
diff < TOLERANCE,
|
||||
"Energy mismatch: CPU={}, GPU={}, diff={}",
|
||||
cpu_energy.total_energy,
|
||||
gpu_energy.total_energy,
|
||||
diff
|
||||
);
|
||||
|
||||
// Verify GPU was actually used
|
||||
assert!(gpu_energy.used_gpu);
|
||||
}
|
||||
None => {
|
||||
// GPU not available, skip test
|
||||
eprintln!("GPU not available, skipping GPU comparison test");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test that coherent graph has near-zero energy on GPU
|
||||
#[tokio::test]
|
||||
async fn test_gpu_coherent_graph() {
|
||||
let graph = create_coherent_graph();
|
||||
|
||||
// CPU energy should be near zero
|
||||
let cpu_energy = graph.compute_energy();
|
||||
assert!(
|
||||
cpu_energy.total_energy < 1e-10,
|
||||
"CPU energy for coherent graph should be near zero: {}",
|
||||
cpu_energy.total_energy
|
||||
);
|
||||
|
||||
// Try GPU computation
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let gpu_energy = engine.compute_energy().await.unwrap();
|
||||
|
||||
assert!(
|
||||
gpu_energy.total_energy < 1e-5,
|
||||
"GPU energy for coherent graph should be near zero: {}",
|
||||
gpu_energy.total_energy
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Test per-edge energy computation
|
||||
#[tokio::test]
|
||||
async fn test_gpu_per_edge_energies() {
|
||||
let graph = create_triangle_graph();
|
||||
|
||||
// Compute CPU energy
|
||||
let cpu_energy = graph.compute_energy();
|
||||
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let gpu_energy = engine.compute_energy().await.unwrap();
|
||||
|
||||
// Same number of edge energies
|
||||
assert_eq!(
|
||||
cpu_energy.edge_energies.len(),
|
||||
gpu_energy.edge_energies.len(),
|
||||
"Edge count mismatch"
|
||||
);
|
||||
|
||||
// Each edge energy should match (order may differ)
|
||||
let cpu_sum: f32 = cpu_energy.edge_energies.values().sum();
|
||||
let gpu_sum: f32 = gpu_energy.edge_energies.iter().sum();
|
||||
|
||||
let diff = (cpu_sum - gpu_sum).abs();
|
||||
assert!(
|
||||
diff < TOLERANCE,
|
||||
"Sum of edge energies mismatch: CPU={}, GPU={}, diff={}",
|
||||
cpu_sum,
|
||||
gpu_sum,
|
||||
diff
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Test with larger graph
|
||||
#[tokio::test]
|
||||
async fn test_gpu_large_graph() {
|
||||
let graph = create_large_graph(100, 5);
|
||||
|
||||
let cpu_energy = graph.compute_energy();
|
||||
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let gpu_energy = engine.compute_energy().await.unwrap();
|
||||
|
||||
// Allow slightly larger tolerance for large graphs due to floating point accumulation
|
||||
let diff = (cpu_energy.total_energy - gpu_energy.total_energy).abs();
|
||||
let relative_diff = diff / cpu_energy.total_energy.max(1.0);
|
||||
|
||||
assert!(
|
||||
relative_diff < 0.01, // 1% relative error
|
||||
"Large graph energy mismatch: CPU={}, GPU={}, relative_diff={:.2}%",
|
||||
cpu_energy.total_energy,
|
||||
gpu_energy.total_energy,
|
||||
relative_diff * 100.0
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Error Handling Tests
|
||||
// ============================================================================
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gpu_empty_graph_error() {
|
||||
let graph = SheafGraph::new();
|
||||
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
let result = engine.upload_graph(&graph);
|
||||
assert!(result.is_err());
|
||||
match result {
|
||||
Err(GpuError::EmptyGraph) => {}
|
||||
Err(e) => panic!("Expected EmptyGraph error, got: {:?}", e),
|
||||
Ok(_) => panic!("Expected error for empty graph"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gpu_error_fallback_detection() {
|
||||
// Test that certain errors trigger fallback
|
||||
assert!(GpuError::NoAdapter.should_fallback());
|
||||
assert!(GpuError::NoDevice("test".into()).should_fallback());
|
||||
assert!(GpuError::DeviceCreation("test".into()).should_fallback());
|
||||
assert!(GpuError::AdapterRequest("test".into()).should_fallback());
|
||||
assert!(GpuError::UnsupportedFeature("test".into()).should_fallback());
|
||||
|
||||
// These should not trigger fallback
|
||||
assert!(!GpuError::Timeout(100).should_fallback());
|
||||
assert!(!GpuError::EmptyGraph.should_fallback());
|
||||
assert!(!GpuError::BufferRead("test".into()).should_fallback());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gpu_error_recoverable() {
|
||||
assert!(GpuError::Timeout(100).is_recoverable());
|
||||
assert!(GpuError::BufferRead("test".into()).is_recoverable());
|
||||
assert!(GpuError::ExecutionFailed("test".into()).is_recoverable());
|
||||
|
||||
assert!(!GpuError::NoAdapter.is_recoverable());
|
||||
assert!(!GpuError::EmptyGraph.is_recoverable());
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// GPU Capabilities Tests
|
||||
// ============================================================================
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gpu_capabilities() {
|
||||
let config = GpuConfig::default();
|
||||
if let Some(engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
let caps = engine.capabilities();
|
||||
|
||||
// Should have valid device info
|
||||
assert!(!caps.device_name.is_empty());
|
||||
assert!(!caps.backend.is_empty());
|
||||
|
||||
// Should have reasonable limits
|
||||
assert!(caps.max_buffer_size > 0);
|
||||
assert!(caps.max_workgroup_size > 0);
|
||||
assert!(caps.max_workgroups[0] > 0);
|
||||
|
||||
// Should be marked as supported
|
||||
assert!(caps.supported);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Synchronous API Tests
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_sync_api() {
|
||||
use prime_radiant::gpu::sync;
|
||||
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = sync::try_create_engine(config) {
|
||||
let graph = create_triangle_graph();
|
||||
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let energy = sync::compute_energy(&mut engine).unwrap();
|
||||
|
||||
assert!(energy.total_energy > 0.0);
|
||||
assert!(energy.used_gpu);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Resource Management Tests
|
||||
// ============================================================================
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gpu_resource_release() {
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
let graph = create_triangle_graph();
|
||||
|
||||
// Upload and compute
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let _ = engine.compute_energy().await.unwrap();
|
||||
|
||||
// Release resources
|
||||
engine.release();
|
||||
|
||||
// Re-upload should work
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
let energy = engine.compute_energy().await.unwrap();
|
||||
assert!(energy.total_energy > 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gpu_multiple_computations() {
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
let graph = create_triangle_graph();
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
|
||||
// Multiple computations should give consistent results
|
||||
let energy1 = engine.compute_energy().await.unwrap();
|
||||
let energy2 = engine.compute_energy().await.unwrap();
|
||||
let energy3 = engine.compute_energy().await.unwrap();
|
||||
|
||||
assert!(
|
||||
(energy1.total_energy - energy2.total_energy).abs() < TOLERANCE,
|
||||
"Inconsistent results between computations"
|
||||
);
|
||||
assert!(
|
||||
(energy2.total_energy - energy3.total_energy).abs() < TOLERANCE,
|
||||
"Inconsistent results between computations"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Performance Tests (disabled by default)
|
||||
// ============================================================================
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Run with: cargo test --features gpu -- --ignored
|
||||
async fn test_gpu_performance_1k_nodes() {
|
||||
let graph = create_large_graph(1000, 10);
|
||||
let edge_count = graph.edge_count();
|
||||
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
|
||||
// Warm up
|
||||
let _ = engine.compute_energy().await.unwrap();
|
||||
|
||||
// Benchmark
|
||||
let start = std::time::Instant::now();
|
||||
let energy = engine.compute_energy().await.unwrap();
|
||||
let gpu_time = start.elapsed();
|
||||
|
||||
// Compare with CPU
|
||||
let start = std::time::Instant::now();
|
||||
let cpu_energy = graph.compute_energy();
|
||||
let cpu_time = start.elapsed();
|
||||
|
||||
println!("Performance test ({} edges):", edge_count);
|
||||
println!(
|
||||
" GPU: {}us ({} edges/ms)",
|
||||
energy.compute_time_us,
|
||||
edge_count as u64 * 1000 / energy.compute_time_us.max(1)
|
||||
);
|
||||
println!(" CPU: {}us", cpu_time.as_micros());
|
||||
println!(
|
||||
" Speedup: {:.2}x",
|
||||
cpu_time.as_micros() as f64 / gpu_time.as_micros() as f64
|
||||
);
|
||||
|
||||
// Verify correctness
|
||||
let diff = (cpu_energy.total_energy - energy.total_energy).abs();
|
||||
let relative_diff = diff / cpu_energy.total_energy.max(1.0);
|
||||
assert!(relative_diff < 0.01, "Performance test: energy mismatch");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn test_gpu_performance_10k_nodes() {
|
||||
let graph = create_large_graph(10000, 10);
|
||||
let edge_count = graph.edge_count();
|
||||
|
||||
let config = GpuConfig::default();
|
||||
if let Some(mut engine) = GpuCoherenceEngine::try_new(config).await {
|
||||
engine.upload_graph(&graph).unwrap();
|
||||
|
||||
// Warm up
|
||||
let _ = engine.compute_energy().await.unwrap();
|
||||
|
||||
// Benchmark
|
||||
let energy = engine.compute_energy().await.unwrap();
|
||||
|
||||
println!(
|
||||
"Large scale test ({} edges): {}us, {} edges/ms",
|
||||
edge_count,
|
||||
energy.compute_time_us,
|
||||
edge_count as u64 * 1000 / energy.compute_time_us.max(1)
|
||||
);
|
||||
|
||||
assert!(energy.total_energy > 0.0);
|
||||
}
|
||||
}
|
||||
772
vendor/ruvector/crates/prime-radiant/tests/integration/coherence_tests.rs
vendored
Normal file
772
vendor/ruvector/crates/prime-radiant/tests/integration/coherence_tests.rs
vendored
Normal file
@@ -0,0 +1,772 @@
|
||||
//! Integration tests for Coherence Computation
|
||||
//!
|
||||
//! Tests the Coherence Computation bounded context, verifying:
|
||||
//! - Full energy computation from graph state
|
||||
//! - Incremental updates when nodes change
|
||||
//! - Spectral drift detection
|
||||
//! - Hotspot identification
|
||||
//! - Caching and fingerprint-based staleness
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
// ============================================================================
|
||||
// TEST INFRASTRUCTURE
|
||||
// ============================================================================
|
||||
|
||||
/// Simple restriction map for testing
|
||||
struct RestrictionMap {
|
||||
matrix: Vec<Vec<f32>>,
|
||||
bias: Vec<f32>,
|
||||
}
|
||||
|
||||
impl RestrictionMap {
|
||||
fn new(rows: usize, cols: usize) -> Self {
|
||||
// Identity-like (truncated or padded)
|
||||
let matrix: Vec<Vec<f32>> = (0..rows)
|
||||
.map(|i| (0..cols).map(|j| if i == j { 1.0 } else { 0.0 }).collect())
|
||||
.collect();
|
||||
let bias = vec![0.0; rows];
|
||||
Self { matrix, bias }
|
||||
}
|
||||
|
||||
fn apply(&self, input: &[f32]) -> Vec<f32> {
|
||||
self.matrix
|
||||
.iter()
|
||||
.zip(&self.bias)
|
||||
.map(|(row, b)| row.iter().zip(input).map(|(a, x)| a * x).sum::<f32>() + b)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn output_dim(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple edge for testing
|
||||
struct TestEdge {
|
||||
source: u64,
|
||||
target: u64,
|
||||
weight: f32,
|
||||
rho_source: RestrictionMap,
|
||||
rho_target: RestrictionMap,
|
||||
}
|
||||
|
||||
impl TestEdge {
|
||||
fn compute_residual(&self, states: &HashMap<u64, Vec<f32>>) -> Option<Vec<f32>> {
|
||||
let source_state = states.get(&self.source)?;
|
||||
let target_state = states.get(&self.target)?;
|
||||
|
||||
let projected_source = self.rho_source.apply(source_state);
|
||||
let projected_target = self.rho_target.apply(target_state);
|
||||
|
||||
Some(
|
||||
projected_source
|
||||
.iter()
|
||||
.zip(&projected_target)
|
||||
.map(|(a, b)| a - b)
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn compute_energy(&self, states: &HashMap<u64, Vec<f32>>) -> Option<f32> {
|
||||
let residual = self.compute_residual(states)?;
|
||||
let norm_sq: f32 = residual.iter().map(|x| x * x).sum();
|
||||
Some(self.weight * norm_sq)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple coherence energy computation
|
||||
fn compute_total_energy(
|
||||
states: &HashMap<u64, Vec<f32>>,
|
||||
edges: &[TestEdge],
|
||||
) -> (f32, HashMap<usize, f32>) {
|
||||
let mut total = 0.0;
|
||||
let mut edge_energies = HashMap::new();
|
||||
|
||||
for (i, edge) in edges.iter().enumerate() {
|
||||
if let Some(energy) = edge.compute_energy(states) {
|
||||
total += energy;
|
||||
edge_energies.insert(i, energy);
|
||||
}
|
||||
}
|
||||
|
||||
(total, edge_energies)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ENERGY COMPUTATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_computation_consistent_section() {
|
||||
// A consistent section (all nodes agree) should have zero energy
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
states.insert(2, vec![1.0, 0.5, 0.3]); // Same state
|
||||
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
}];
|
||||
|
||||
let (total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Energy should be zero (or very close) for consistent section
|
||||
assert!(total < 1e-10, "Expected near-zero energy, got {}", total);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_computation_inconsistent_section() {
|
||||
// Inconsistent states should produce positive energy
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
states.insert(2, vec![0.5, 0.8, 0.1]); // Different state
|
||||
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
}];
|
||||
|
||||
let (total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Compute expected energy manually
|
||||
let residual = vec![1.0 - 0.5, 0.5 - 0.8, 0.3 - 0.1]; // [0.5, -0.3, 0.2]
|
||||
let expected: f32 = residual.iter().map(|x| x * x).sum(); // 0.25 + 0.09 + 0.04 = 0.38
|
||||
|
||||
assert!(
|
||||
(total - expected).abs() < 1e-6,
|
||||
"Expected energy {}, got {}",
|
||||
expected,
|
||||
total
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_computation_weighted_edges() {
|
||||
// Edge weight should scale energy proportionally
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.0, 0.0]);
|
||||
|
||||
let weight1 = 1.0;
|
||||
let weight10 = 10.0;
|
||||
|
||||
let edges_w1 = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: weight1,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
}];
|
||||
|
||||
let edges_w10 = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: weight10,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
}];
|
||||
|
||||
let (energy_w1, _) = compute_total_energy(&states, &edges_w1);
|
||||
let (energy_w10, _) = compute_total_energy(&states, &edges_w10);
|
||||
|
||||
assert!(
|
||||
(energy_w10 / energy_w1 - 10.0).abs() < 1e-6,
|
||||
"Expected 10x energy scaling"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_is_nonnegative() {
|
||||
// Energy should always be non-negative (sum of squared terms)
|
||||
use rand::Rng;
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for _ in 0..100 {
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, (0..4).map(|_| rng.gen_range(-10.0..10.0)).collect());
|
||||
states.insert(2, (0..4).map(|_| rng.gen_range(-10.0..10.0)).collect());
|
||||
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: rng.gen_range(0.0..10.0),
|
||||
rho_source: RestrictionMap::new(4, 4),
|
||||
rho_target: RestrictionMap::new(4, 4),
|
||||
}];
|
||||
|
||||
let (total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
assert!(total >= 0.0, "Energy must be non-negative, got {}", total);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_with_multiple_edges() {
|
||||
// Total energy should be sum of individual edge energies
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.5, 0.0]);
|
||||
states.insert(3, vec![0.0, 0.0]);
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
let (total, edge_energies) = compute_total_energy(&states, &edges);
|
||||
|
||||
let sum_of_parts: f32 = edge_energies.values().sum();
|
||||
assert!(
|
||||
(total - sum_of_parts).abs() < 1e-10,
|
||||
"Total should equal sum of parts"
|
||||
);
|
||||
|
||||
// Verify individual energies
|
||||
// Edge 1-2: residual = [0.5, 0.0], energy = 0.25
|
||||
// Edge 2-3: residual = [0.5, 0.0], energy = 0.25
|
||||
assert!((total - 0.5).abs() < 1e-6);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// INCREMENTAL UPDATE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_incremental_update_single_node() {
|
||||
// Updating a single node should only affect incident edges
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.5, 0.0]);
|
||||
states.insert(3, vec![0.0, 0.0]);
|
||||
|
||||
// Edges: 1-2, 3 is isolated
|
||||
let edges = vec![TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
}];
|
||||
|
||||
let (energy_before, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Update node 1
|
||||
states.insert(1, vec![0.8, 0.0]);
|
||||
let (energy_after, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Energy should change because node 1 is incident to an edge
|
||||
assert_ne!(
|
||||
energy_before, energy_after,
|
||||
"Energy should change when incident node updates"
|
||||
);
|
||||
|
||||
// Update isolated node 3
|
||||
states.insert(3, vec![0.5, 0.5]);
|
||||
let (energy_isolated, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Energy should NOT change because node 3 is isolated
|
||||
assert!(
|
||||
(energy_after - energy_isolated).abs() < 1e-10,
|
||||
"Energy should not change when isolated node updates"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incremental_update_affected_edges() {
|
||||
// Helper to find edges affected by a node update
|
||||
fn affected_edges(node_id: u64, edges: &[TestEdge]) -> Vec<usize> {
|
||||
edges
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, e)| e.source == node_id || e.target == node_id)
|
||||
.map(|(i, _)| i)
|
||||
.collect()
|
||||
}
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 3,
|
||||
target: 4,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
// Node 2 is incident to edges 0 and 1
|
||||
let affected = affected_edges(2, &edges);
|
||||
assert_eq!(affected, vec![0, 1]);
|
||||
|
||||
// Node 1 is incident to edge 0 only
|
||||
let affected = affected_edges(1, &edges);
|
||||
assert_eq!(affected, vec![0]);
|
||||
|
||||
// Node 4 is incident to edge 2 only
|
||||
let affected = affected_edges(4, &edges);
|
||||
assert_eq!(affected, vec![2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incremental_vs_full_recomputation() {
|
||||
// Incremental and full recomputation should produce the same result
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
states.insert(2, vec![0.8, 0.6, 0.4]);
|
||||
states.insert(3, vec![0.6, 0.7, 0.5]);
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(3, 3),
|
||||
rho_target: RestrictionMap::new(3, 3),
|
||||
},
|
||||
];
|
||||
|
||||
// Full computation
|
||||
let (energy_full, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
// Simulate incremental by computing only affected edges
|
||||
let affected_by_node2: Vec<usize> = edges
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, e)| e.source == 2 || e.target == 2)
|
||||
.map(|(i, _)| i)
|
||||
.collect();
|
||||
|
||||
let mut incremental_sum = 0.0;
|
||||
for i in 0..edges.len() {
|
||||
if let Some(energy) = edges[i].compute_energy(&states) {
|
||||
incremental_sum += energy;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
(energy_full - incremental_sum).abs() < 1e-10,
|
||||
"Incremental and full should match"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// RESIDUAL COMPUTATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_residual_symmetry() {
|
||||
// r_e for edge (u,v) should be negation of r_e for edge (v,u)
|
||||
// when restriction maps are the same
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5]);
|
||||
states.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
let rho = RestrictionMap::new(2, 2);
|
||||
|
||||
let edge_uv = TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
};
|
||||
|
||||
let edge_vu = TestEdge {
|
||||
source: 2,
|
||||
target: 1,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
};
|
||||
|
||||
let r_uv = edge_uv.compute_residual(&states).unwrap();
|
||||
let r_vu = edge_vu.compute_residual(&states).unwrap();
|
||||
|
||||
// Check that r_uv = -r_vu
|
||||
for (a, b) in r_uv.iter().zip(&r_vu) {
|
||||
assert!(
|
||||
(a + b).abs() < 1e-10,
|
||||
"Residuals should be negations of each other"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_residual_dimension() {
|
||||
// Residual dimension should match restriction map output dimension
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.5, 0.3, 0.2]);
|
||||
states.insert(2, vec![0.8, 0.6, 0.4, 0.3]);
|
||||
|
||||
let edge = TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 4), // 4D -> 2D
|
||||
rho_target: RestrictionMap::new(2, 4),
|
||||
};
|
||||
|
||||
let residual = edge.compute_residual(&states).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
residual.len(),
|
||||
edge.rho_source.output_dim(),
|
||||
"Residual dimension should match restriction map output"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// HOTSPOT IDENTIFICATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_hotspot_identification() {
|
||||
// Find edges with highest energy
|
||||
fn find_hotspots(edge_energies: &HashMap<usize, f32>, k: usize) -> Vec<(usize, f32)> {
|
||||
let mut sorted: Vec<_> = edge_energies.iter().collect();
|
||||
sorted.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap());
|
||||
sorted.into_iter().take(k).map(|(i, e)| (*i, *e)).collect()
|
||||
}
|
||||
|
||||
let mut states = HashMap::new();
|
||||
states.insert(1, vec![1.0, 0.0]);
|
||||
states.insert(2, vec![0.1, 0.0]); // Large difference with 1
|
||||
states.insert(3, vec![0.05, 0.0]); // Small difference with 2
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
let (_, edge_energies) = compute_total_energy(&states, &edges);
|
||||
|
||||
let hotspots = find_hotspots(&edge_energies, 1);
|
||||
|
||||
// Edge 0 (1-2) should have higher energy
|
||||
assert_eq!(hotspots[0].0, 0, "Edge 1-2 should be the hotspot");
|
||||
assert!(
|
||||
edge_energies.get(&0).unwrap() > edge_energies.get(&1).unwrap(),
|
||||
"Edge 1-2 should have higher energy than edge 2-3"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SCOPE-BASED ENERGY TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_by_scope() {
|
||||
// Energy can be aggregated by scope (namespace)
|
||||
let mut states = HashMap::new();
|
||||
let mut node_scopes: HashMap<u64, String> = HashMap::new();
|
||||
|
||||
// Finance nodes
|
||||
states.insert(1, vec![1.0, 0.5]);
|
||||
states.insert(2, vec![0.8, 0.6]);
|
||||
node_scopes.insert(1, "finance".to_string());
|
||||
node_scopes.insert(2, "finance".to_string());
|
||||
|
||||
// Medical nodes
|
||||
states.insert(3, vec![0.5, 0.3]);
|
||||
states.insert(4, vec![0.2, 0.1]);
|
||||
node_scopes.insert(3, "medical".to_string());
|
||||
node_scopes.insert(4, "medical".to_string());
|
||||
|
||||
let edges = vec![
|
||||
TestEdge {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
TestEdge {
|
||||
source: 3,
|
||||
target: 4,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
},
|
||||
];
|
||||
|
||||
fn energy_by_scope(
|
||||
edges: &[TestEdge],
|
||||
states: &HashMap<u64, Vec<f32>>,
|
||||
node_scopes: &HashMap<u64, String>,
|
||||
) -> HashMap<String, f32> {
|
||||
let mut scope_energy: HashMap<String, f32> = HashMap::new();
|
||||
|
||||
for edge in edges {
|
||||
if let Some(energy) = edge.compute_energy(states) {
|
||||
let source_scope = node_scopes.get(&edge.source).cloned().unwrap_or_default();
|
||||
*scope_energy.entry(source_scope).or_insert(0.0) += energy;
|
||||
}
|
||||
}
|
||||
|
||||
scope_energy
|
||||
}
|
||||
|
||||
let by_scope = energy_by_scope(&edges, &states, &node_scopes);
|
||||
|
||||
assert!(by_scope.contains_key("finance"));
|
||||
assert!(by_scope.contains_key("medical"));
|
||||
assert!(by_scope.get("finance").unwrap() > &0.0);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FINGERPRINT AND CACHING TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_cache_invalidation_on_state_change() {
|
||||
// Cached energy should be invalidated when state changes
|
||||
struct CachedEnergy {
|
||||
value: Option<f32>,
|
||||
fingerprint: u64,
|
||||
}
|
||||
|
||||
impl CachedEnergy {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
value: None,
|
||||
fingerprint: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_or_compute(
|
||||
&mut self,
|
||||
current_fingerprint: u64,
|
||||
compute_fn: impl FnOnce() -> f32,
|
||||
) -> f32 {
|
||||
if self.fingerprint == current_fingerprint {
|
||||
if let Some(v) = self.value {
|
||||
return v;
|
||||
}
|
||||
}
|
||||
|
||||
let value = compute_fn();
|
||||
self.value = Some(value);
|
||||
self.fingerprint = current_fingerprint;
|
||||
value
|
||||
}
|
||||
|
||||
fn invalidate(&mut self) {
|
||||
self.value = None;
|
||||
}
|
||||
}
|
||||
|
||||
let mut cache = CachedEnergy::new();
|
||||
let mut compute_count = 0;
|
||||
|
||||
// First computation
|
||||
let v1 = cache.get_or_compute(1, || {
|
||||
compute_count += 1;
|
||||
10.0
|
||||
});
|
||||
assert_eq!(v1, 10.0);
|
||||
assert_eq!(compute_count, 1);
|
||||
|
||||
// Cached retrieval (same fingerprint)
|
||||
let v2 = cache.get_or_compute(1, || {
|
||||
compute_count += 1;
|
||||
10.0
|
||||
});
|
||||
assert_eq!(v2, 10.0);
|
||||
assert_eq!(compute_count, 1); // Not recomputed
|
||||
|
||||
// Fingerprint changed - should recompute
|
||||
let v3 = cache.get_or_compute(2, || {
|
||||
compute_count += 1;
|
||||
20.0
|
||||
});
|
||||
assert_eq!(v3, 20.0);
|
||||
assert_eq!(compute_count, 2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PARALLEL COMPUTATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_parallel_energy_computation() {
|
||||
// Energy computation should be parallelizable across edges
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
let mut states = HashMap::new();
|
||||
for i in 0..100 {
|
||||
states.insert(i as u64, vec![i as f32 / 100.0, 0.5]);
|
||||
}
|
||||
|
||||
let mut edges = Vec::new();
|
||||
for i in 0..99 {
|
||||
edges.push(TestEdge {
|
||||
source: i,
|
||||
target: i + 1,
|
||||
weight: 1.0,
|
||||
rho_source: RestrictionMap::new(2, 2),
|
||||
rho_target: RestrictionMap::new(2, 2),
|
||||
});
|
||||
}
|
||||
|
||||
// Simulate parallel computation
|
||||
let states = Arc::new(states);
|
||||
let edges = Arc::new(edges);
|
||||
let total = Arc::new(std::sync::Mutex::new(0.0f32));
|
||||
let num_threads = 4;
|
||||
let edges_per_thread = edges.len() / num_threads;
|
||||
|
||||
let handles: Vec<_> = (0..num_threads)
|
||||
.map(|t| {
|
||||
let states = Arc::clone(&states);
|
||||
let edges = Arc::clone(&edges);
|
||||
let total = Arc::clone(&total);
|
||||
|
||||
thread::spawn(move || {
|
||||
let start = t * edges_per_thread;
|
||||
let end = if t == num_threads - 1 {
|
||||
edges.len()
|
||||
} else {
|
||||
(t + 1) * edges_per_thread
|
||||
};
|
||||
|
||||
let mut local_sum = 0.0;
|
||||
for i in start..end {
|
||||
if let Some(energy) = edges[i].compute_energy(&states) {
|
||||
local_sum += energy;
|
||||
}
|
||||
}
|
||||
|
||||
let mut total = total.lock().unwrap();
|
||||
*total += local_sum;
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
let parallel_total = *total.lock().unwrap();
|
||||
|
||||
// Verify against sequential
|
||||
let (sequential_total, _) = compute_total_energy(&states, &edges);
|
||||
|
||||
assert!(
|
||||
(parallel_total - sequential_total).abs() < 1e-6,
|
||||
"Parallel and sequential computation should match"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SPECTRAL DRIFT DETECTION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_spectral_drift_detection() {
|
||||
// Spectral drift should be detected when eigenvalue distribution changes significantly
|
||||
|
||||
/// Simple eigenvalue snapshot
|
||||
struct EigenvalueSnapshot {
|
||||
eigenvalues: Vec<f32>,
|
||||
}
|
||||
|
||||
/// Wasserstein-like distance between eigenvalue distributions
|
||||
fn eigenvalue_distance(a: &[f32], b: &[f32]) -> f32 {
|
||||
if a.len() != b.len() {
|
||||
return f32::MAX;
|
||||
}
|
||||
|
||||
let mut a_sorted = a.to_vec();
|
||||
let mut b_sorted = b.to_vec();
|
||||
a_sorted.sort_by(|x, y| x.partial_cmp(y).unwrap());
|
||||
b_sorted.sort_by(|x, y| x.partial_cmp(y).unwrap());
|
||||
|
||||
a_sorted
|
||||
.iter()
|
||||
.zip(&b_sorted)
|
||||
.map(|(x, y)| (x - y).abs())
|
||||
.sum::<f32>()
|
||||
/ a.len() as f32
|
||||
}
|
||||
|
||||
let snapshot1 = EigenvalueSnapshot {
|
||||
eigenvalues: vec![0.1, 0.3, 0.5, 0.8, 1.0],
|
||||
};
|
||||
|
||||
// Small change - no drift
|
||||
let snapshot2 = EigenvalueSnapshot {
|
||||
eigenvalues: vec![0.11, 0.31, 0.49, 0.79, 1.01],
|
||||
};
|
||||
|
||||
// Large change - drift detected
|
||||
let snapshot3 = EigenvalueSnapshot {
|
||||
eigenvalues: vec![0.5, 0.6, 0.7, 0.9, 2.0],
|
||||
};
|
||||
|
||||
let dist_small = eigenvalue_distance(&snapshot1.eigenvalues, &snapshot2.eigenvalues);
|
||||
let dist_large = eigenvalue_distance(&snapshot1.eigenvalues, &snapshot3.eigenvalues);
|
||||
|
||||
let drift_threshold = 0.1;
|
||||
|
||||
assert!(
|
||||
dist_small < drift_threshold,
|
||||
"Small change should not trigger drift"
|
||||
);
|
||||
assert!(
|
||||
dist_large > drift_threshold,
|
||||
"Large change should trigger drift"
|
||||
);
|
||||
}
|
||||
715
vendor/ruvector/crates/prime-radiant/tests/integration/gate_tests.rs
vendored
Normal file
715
vendor/ruvector/crates/prime-radiant/tests/integration/gate_tests.rs
vendored
Normal file
@@ -0,0 +1,715 @@
|
||||
//! Integration tests for Coherence Gate and Compute Ladder
|
||||
//!
|
||||
//! Tests the Execution bounded context, verifying:
|
||||
//! - Gate decisions based on energy thresholds
|
||||
//! - Compute ladder escalation (O(1) -> O(n) -> O(n^o(1)))
|
||||
//! - Persistence detection for blocking decisions
|
||||
//! - Throttling behavior under high energy
|
||||
//! - Multi-lane processing
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// ============================================================================
|
||||
// TEST TYPES
|
||||
// ============================================================================
|
||||
|
||||
/// Gate decision outcomes
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum GateDecision {
|
||||
/// Green light - proceed without restriction
|
||||
Allow,
|
||||
/// Amber light - throttle the action
|
||||
Throttle { factor: u32 },
|
||||
/// Red light - block the action
|
||||
Block,
|
||||
}
|
||||
|
||||
/// Compute lane for escalation
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum ComputeLane {
|
||||
/// O(1) - Local tile check, immediate response
|
||||
Local,
|
||||
/// O(k) - k-hop neighborhood check
|
||||
Neighborhood { k: usize },
|
||||
/// O(n) - Full graph traversal
|
||||
Global,
|
||||
/// O(n^o(1)) - Subpolynomial spectral analysis
|
||||
Spectral,
|
||||
}
|
||||
|
||||
/// Threshold configuration
|
||||
#[derive(Clone, Debug)]
|
||||
struct ThresholdConfig {
|
||||
/// Energy below this -> Allow
|
||||
green_threshold: f32,
|
||||
/// Energy below this (but above green) -> Throttle
|
||||
amber_threshold: f32,
|
||||
/// Energy above this -> Block
|
||||
red_threshold: f32,
|
||||
/// Enable compute ladder escalation
|
||||
escalation_enabled: bool,
|
||||
/// Maximum escalation lane
|
||||
max_escalation_lane: ComputeLane,
|
||||
}
|
||||
|
||||
impl Default for ThresholdConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
green_threshold: 0.1,
|
||||
amber_threshold: 0.5,
|
||||
red_threshold: 1.0,
|
||||
escalation_enabled: true,
|
||||
max_escalation_lane: ComputeLane::Spectral,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Coherence gate engine
|
||||
struct CoherenceGate {
|
||||
config: ThresholdConfig,
|
||||
current_lane: ComputeLane,
|
||||
decision_history: VecDeque<GateDecision>,
|
||||
persistence_window: usize,
|
||||
}
|
||||
|
||||
impl CoherenceGate {
|
||||
fn new(config: ThresholdConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
current_lane: ComputeLane::Local,
|
||||
decision_history: VecDeque::new(),
|
||||
persistence_window: 5,
|
||||
}
|
||||
}
|
||||
|
||||
/// Make a gate decision based on current energy
|
||||
fn decide(&mut self, energy: f32) -> GateDecision {
|
||||
let decision = if energy < self.config.green_threshold {
|
||||
GateDecision::Allow
|
||||
} else if energy < self.config.amber_threshold {
|
||||
// Calculate throttle factor based on energy
|
||||
let ratio = (energy - self.config.green_threshold)
|
||||
/ (self.config.amber_threshold - self.config.green_threshold);
|
||||
let factor = (1.0 + ratio * 9.0) as u32; // 1x to 10x throttle
|
||||
GateDecision::Throttle { factor }
|
||||
} else {
|
||||
GateDecision::Block
|
||||
};
|
||||
|
||||
// Track history for persistence detection
|
||||
self.decision_history.push_back(decision);
|
||||
if self.decision_history.len() > self.persistence_window {
|
||||
self.decision_history.pop_front();
|
||||
}
|
||||
|
||||
decision
|
||||
}
|
||||
|
||||
/// Check if blocking is persistent
|
||||
fn is_persistent_block(&self) -> bool {
|
||||
if self.decision_history.len() < self.persistence_window {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.decision_history
|
||||
.iter()
|
||||
.all(|d| matches!(d, GateDecision::Block))
|
||||
}
|
||||
|
||||
/// Escalate to higher compute lane
|
||||
fn escalate(&mut self) -> Option<ComputeLane> {
|
||||
if !self.config.escalation_enabled {
|
||||
return None;
|
||||
}
|
||||
|
||||
let next_lane = match self.current_lane {
|
||||
ComputeLane::Local => Some(ComputeLane::Neighborhood { k: 2 }),
|
||||
ComputeLane::Neighborhood { k } if k < 5 => {
|
||||
Some(ComputeLane::Neighborhood { k: k + 1 })
|
||||
}
|
||||
ComputeLane::Neighborhood { .. } => Some(ComputeLane::Global),
|
||||
ComputeLane::Global => Some(ComputeLane::Spectral),
|
||||
ComputeLane::Spectral => None, // Already at max
|
||||
};
|
||||
|
||||
if let Some(lane) = next_lane {
|
||||
if lane <= self.config.max_escalation_lane {
|
||||
self.current_lane = lane;
|
||||
return Some(lane);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// De-escalate to lower compute lane
|
||||
fn deescalate(&mut self) -> Option<ComputeLane> {
|
||||
let prev_lane = match self.current_lane {
|
||||
ComputeLane::Local => None,
|
||||
ComputeLane::Neighborhood { k } if k > 2 => {
|
||||
Some(ComputeLane::Neighborhood { k: k - 1 })
|
||||
}
|
||||
ComputeLane::Neighborhood { .. } => Some(ComputeLane::Local),
|
||||
ComputeLane::Global => Some(ComputeLane::Neighborhood { k: 5 }),
|
||||
ComputeLane::Spectral => Some(ComputeLane::Global),
|
||||
};
|
||||
|
||||
if let Some(lane) = prev_lane {
|
||||
self.current_lane = lane;
|
||||
return Some(lane);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Get current compute lane
|
||||
fn current_lane(&self) -> ComputeLane {
|
||||
self.current_lane
|
||||
}
|
||||
|
||||
/// Clear decision history
|
||||
fn reset_history(&mut self) {
|
||||
self.decision_history.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// BASIC GATE DECISION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_gate_allows_low_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision = gate.decide(0.05);
|
||||
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_throttles_medium_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision = gate.decide(0.3);
|
||||
|
||||
match decision {
|
||||
GateDecision::Throttle { factor } => {
|
||||
assert!(factor >= 1);
|
||||
assert!(factor <= 10);
|
||||
}
|
||||
_ => panic!("Expected Throttle decision, got {:?}", decision),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_blocks_high_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision = gate.decide(0.8);
|
||||
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_blocks_above_red_threshold() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
red_threshold: 0.5,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.6);
|
||||
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_throttle_factor_increases_with_energy() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let decision_low = gate.decide(0.15);
|
||||
let decision_high = gate.decide(0.45);
|
||||
|
||||
match (decision_low, decision_high) {
|
||||
(GateDecision::Throttle { factor: f1 }, GateDecision::Throttle { factor: f2 }) => {
|
||||
assert!(
|
||||
f2 > f1,
|
||||
"Higher energy should produce higher throttle factor"
|
||||
);
|
||||
}
|
||||
_ => panic!("Expected both to be Throttle decisions"),
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// THRESHOLD BOUNDARY TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_boundary_just_below_green() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.099);
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boundary_at_green() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// At the threshold, should still be Allow (< comparison)
|
||||
let decision = gate.decide(0.1);
|
||||
assert!(matches!(decision, GateDecision::Throttle { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boundary_just_below_amber() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
amber_threshold: 0.5,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.499);
|
||||
assert!(matches!(decision, GateDecision::Throttle { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_boundary_at_amber() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
green_threshold: 0.1,
|
||||
amber_threshold: 0.5,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let decision = gate.decide(0.5);
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// COMPUTE LADDER ESCALATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_initial_lane_is_local() {
|
||||
let gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_from_local_to_neighborhood() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let new_lane = gate.escalate();
|
||||
|
||||
assert_eq!(new_lane, Some(ComputeLane::Neighborhood { k: 2 }));
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 2 });
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_through_neighborhood_k() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Local -> Neighborhood k=2
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 2 });
|
||||
|
||||
// Neighborhood k=2 -> k=3
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 3 });
|
||||
|
||||
// k=3 -> k=4
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 4 });
|
||||
|
||||
// k=4 -> k=5
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 5 });
|
||||
|
||||
// k=5 -> Global
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Global);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_to_spectral() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Escalate all the way
|
||||
while let Some(_) = gate.escalate() {
|
||||
// Keep escalating
|
||||
}
|
||||
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Spectral);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_respects_max_lane() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
max_escalation_lane: ComputeLane::Global,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
// Escalate to max
|
||||
while let Some(_) = gate.escalate() {}
|
||||
|
||||
// Should stop at Global, not Spectral
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Global);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escalation_disabled() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig {
|
||||
escalation_enabled: false,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let result = gate.escalate();
|
||||
|
||||
assert_eq!(result, None);
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deescalation_from_spectral() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Escalate to spectral
|
||||
while let Some(_) = gate.escalate() {}
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Spectral);
|
||||
|
||||
// Deescalate one step
|
||||
let lane = gate.deescalate();
|
||||
assert_eq!(lane, Some(ComputeLane::Global));
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Global);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deescalation_to_local() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Escalate a few times
|
||||
gate.escalate();
|
||||
gate.escalate();
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Neighborhood { k: 3 });
|
||||
|
||||
// Deescalate all the way
|
||||
while let Some(_) = gate.deescalate() {}
|
||||
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deescalation_from_local_returns_none() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
let result = gate.deescalate();
|
||||
|
||||
assert_eq!(result, None);
|
||||
assert_eq!(gate.current_lane(), ComputeLane::Local);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PERSISTENCE DETECTION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_no_persistence_initially() {
|
||||
let gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence_detected_after_window() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Block persistently
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.9); // Block
|
||||
}
|
||||
|
||||
assert!(gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_persistence_with_mixed_decisions() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Mix of decisions
|
||||
gate.decide(0.9); // Block
|
||||
gate.decide(0.05); // Allow
|
||||
gate.decide(0.9); // Block
|
||||
gate.decide(0.9); // Block
|
||||
gate.decide(0.9); // Block
|
||||
|
||||
// Not all blocks, so not persistent
|
||||
assert!(!gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence_window_sliding() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Start with allows
|
||||
for _ in 0..3 {
|
||||
gate.decide(0.05); // Allow
|
||||
}
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
|
||||
// Then all blocks
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.9); // Block
|
||||
}
|
||||
|
||||
// Now persistent
|
||||
assert!(gate.is_persistent_block());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_clears_persistence() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Build up persistence
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.9);
|
||||
}
|
||||
assert!(gate.is_persistent_block());
|
||||
|
||||
// Reset
|
||||
gate.reset_history();
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MULTI-LANE PROCESSING TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_lane_complexity_ordering() {
|
||||
// Verify lanes are properly ordered by complexity
|
||||
assert!(ComputeLane::Local < ComputeLane::Neighborhood { k: 2 });
|
||||
assert!(ComputeLane::Neighborhood { k: 2 } < ComputeLane::Neighborhood { k: 3 });
|
||||
assert!(ComputeLane::Neighborhood { k: 5 } < ComputeLane::Global);
|
||||
assert!(ComputeLane::Global < ComputeLane::Spectral);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automatic_escalation_on_block() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Simulate escalation policy: escalate on block
|
||||
let energy = 0.8;
|
||||
let decision = gate.decide(energy);
|
||||
|
||||
if matches!(decision, GateDecision::Block) {
|
||||
let escalated = gate.escalate().is_some();
|
||||
assert!(escalated, "Should escalate on block");
|
||||
}
|
||||
|
||||
// After one escalation
|
||||
assert!(gate.current_lane() > ComputeLane::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_automatic_deescalation_on_allow() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// First, escalate
|
||||
gate.escalate();
|
||||
gate.escalate();
|
||||
assert!(gate.current_lane() > ComputeLane::Local);
|
||||
|
||||
// Then allow (low energy)
|
||||
let decision = gate.decide(0.01);
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
|
||||
// Deescalate after allow
|
||||
gate.deescalate();
|
||||
assert!(gate.current_lane() < ComputeLane::Neighborhood { k: 3 });
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CUSTOM THRESHOLD TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_custom_thresholds() {
|
||||
let config = ThresholdConfig {
|
||||
green_threshold: 0.05,
|
||||
amber_threshold: 0.15,
|
||||
red_threshold: 0.25,
|
||||
escalation_enabled: true,
|
||||
max_escalation_lane: ComputeLane::Global,
|
||||
};
|
||||
|
||||
let mut gate = CoherenceGate::new(config);
|
||||
|
||||
// Very low energy -> Allow
|
||||
assert_eq!(gate.decide(0.03), GateDecision::Allow);
|
||||
|
||||
// Low-medium energy -> Throttle
|
||||
assert!(matches!(gate.decide(0.10), GateDecision::Throttle { .. }));
|
||||
|
||||
// Medium energy -> Block (with these tight thresholds)
|
||||
assert_eq!(gate.decide(0.20), GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_thresholds() {
|
||||
let config = ThresholdConfig {
|
||||
green_threshold: 0.0,
|
||||
amber_threshold: 0.0,
|
||||
red_threshold: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut gate = CoherenceGate::new(config);
|
||||
|
||||
// Any positive energy should block
|
||||
assert_eq!(gate.decide(0.001), GateDecision::Block);
|
||||
assert_eq!(gate.decide(1.0), GateDecision::Block);
|
||||
|
||||
// Zero energy should... well, it's at the boundary
|
||||
// < 0 is Allow, >= 0 is the next category
|
||||
// With all thresholds at 0, any energy >= 0 goes to Block
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CONCURRENT GATE ACCESS TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_gate_thread_safety_simulation() {
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
// Wrap gate in mutex for thread-safe access
|
||||
let gate = Arc::new(Mutex::new(CoherenceGate::new(ThresholdConfig::default())));
|
||||
|
||||
let handles: Vec<_> = (0..4)
|
||||
.map(|i| {
|
||||
let gate = Arc::clone(&gate);
|
||||
thread::spawn(move || {
|
||||
let energy = 0.1 * (i as f32);
|
||||
let mut gate = gate.lock().unwrap();
|
||||
let decision = gate.decide(energy);
|
||||
(i, decision)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// Verify each thread got a decision
|
||||
assert_eq!(results.len(), 4);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// ENERGY SPIKE HANDLING TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_spike_causes_immediate_block() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Normal operation
|
||||
for _ in 0..3 {
|
||||
let decision = gate.decide(0.05);
|
||||
assert_eq!(decision, GateDecision::Allow);
|
||||
}
|
||||
|
||||
// Energy spike
|
||||
let decision = gate.decide(0.9);
|
||||
assert_eq!(decision, GateDecision::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recovery_after_spike() {
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
|
||||
// Spike
|
||||
gate.decide(0.9);
|
||||
assert!(!gate.is_persistent_block()); // Not persistent yet
|
||||
|
||||
// Recovery
|
||||
for _ in 0..5 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
|
||||
assert!(!gate.is_persistent_block());
|
||||
|
||||
// All recent decisions should be Allow
|
||||
assert_eq!(gate.decide(0.05), GateDecision::Allow);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LANE LATENCY SIMULATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_lane_latency_simulation() {
|
||||
/// Simulated latency for each lane
|
||||
fn lane_latency(lane: ComputeLane) -> Duration {
|
||||
match lane {
|
||||
ComputeLane::Local => Duration::from_micros(10),
|
||||
ComputeLane::Neighborhood { k } => Duration::from_micros(100 * k as u64),
|
||||
ComputeLane::Global => Duration::from_millis(10),
|
||||
ComputeLane::Spectral => Duration::from_millis(100),
|
||||
}
|
||||
}
|
||||
|
||||
let lanes = vec![
|
||||
ComputeLane::Local,
|
||||
ComputeLane::Neighborhood { k: 2 },
|
||||
ComputeLane::Neighborhood { k: 5 },
|
||||
ComputeLane::Global,
|
||||
ComputeLane::Spectral,
|
||||
];
|
||||
|
||||
let latencies: Vec<_> = lanes.iter().map(|l| lane_latency(*l)).collect();
|
||||
|
||||
// Verify latencies are increasing
|
||||
for i in 1..latencies.len() {
|
||||
assert!(
|
||||
latencies[i] > latencies[i - 1],
|
||||
"Higher lanes should have higher latency"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// REAL-TIME BUDGET TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_local_lane_meets_budget() {
|
||||
// Local lane should complete in <1ms budget
|
||||
let budget = Duration::from_millis(1);
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
// Simulate local computation (just a decision)
|
||||
let mut gate = CoherenceGate::new(ThresholdConfig::default());
|
||||
for _ in 0..1000 {
|
||||
gate.decide(0.05);
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// 1000 decisions should still be fast
|
||||
assert!(
|
||||
elapsed < budget * 100, // Very generous for test environment
|
||||
"Local computation took too long: {:?}",
|
||||
elapsed
|
||||
);
|
||||
}
|
||||
1017
vendor/ruvector/crates/prime-radiant/tests/integration/governance_tests.rs
vendored
Normal file
1017
vendor/ruvector/crates/prime-radiant/tests/integration/governance_tests.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
528
vendor/ruvector/crates/prime-radiant/tests/integration/graph_tests.rs
vendored
Normal file
528
vendor/ruvector/crates/prime-radiant/tests/integration/graph_tests.rs
vendored
Normal file
@@ -0,0 +1,528 @@
|
||||
//! Integration tests for SheafGraph CRUD operations and dimension validation
|
||||
//!
|
||||
//! Tests the Knowledge Substrate bounded context, verifying:
|
||||
//! - Node creation, update, and deletion
|
||||
//! - Edge creation with restriction maps
|
||||
//! - Dimension compatibility validation
|
||||
//! - Subgraph extraction
|
||||
//! - Fingerprint-based change detection
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Test helper: Create a simple identity restriction map
|
||||
fn identity_restriction(dim: usize) -> Vec<Vec<f32>> {
|
||||
(0..dim)
|
||||
.map(|i| {
|
||||
let mut row = vec![0.0; dim];
|
||||
row[i] = 1.0;
|
||||
row
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Test helper: Create a projection restriction map (projects to first k dimensions)
|
||||
fn projection_restriction(input_dim: usize, output_dim: usize) -> Vec<Vec<f32>> {
|
||||
(0..output_dim)
|
||||
.map(|i| {
|
||||
let mut row = vec![0.0; input_dim];
|
||||
if i < input_dim {
|
||||
row[i] = 1.0;
|
||||
}
|
||||
row
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SHEAF NODE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_node_creation_with_valid_state() {
|
||||
// A node should be creatable with a valid state vector
|
||||
let state: Vec<f32> = vec![1.0, 0.5, 0.3, 0.2];
|
||||
let dimension = state.len();
|
||||
|
||||
// Verify state is preserved
|
||||
assert_eq!(state.len(), dimension);
|
||||
assert!((state[0] - 1.0).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_state_update_preserves_dimension() {
|
||||
// When updating a node's state, the dimension must remain constant
|
||||
let initial_state = vec![1.0, 0.5, 0.3];
|
||||
let new_state = vec![0.8, 0.6, 0.4];
|
||||
|
||||
assert_eq!(initial_state.len(), new_state.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_state_update_rejects_dimension_mismatch() {
|
||||
// Updating with a different dimension should fail
|
||||
let initial_state = vec![1.0, 0.5, 0.3];
|
||||
let wrong_state = vec![0.8, 0.6]; // Only 2 dimensions
|
||||
|
||||
assert_ne!(initial_state.len(), wrong_state.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_metadata_stores_custom_fields() {
|
||||
// Nodes should support custom metadata for domain-specific information
|
||||
let mut metadata: HashMap<String, String> = HashMap::new();
|
||||
metadata.insert("source".to_string(), "sensor_1".to_string());
|
||||
metadata.insert("confidence".to_string(), "0.95".to_string());
|
||||
|
||||
assert_eq!(metadata.get("source"), Some(&"sensor_1".to_string()));
|
||||
assert_eq!(metadata.len(), 2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SHEAF EDGE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_edge_creation_with_identity_restriction() {
|
||||
// Edges with identity restrictions should not transform states
|
||||
let dim = 4;
|
||||
let rho = identity_restriction(dim);
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
|
||||
// Apply restriction
|
||||
let result: Vec<f32> = rho
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(&state).map(|(a, b)| a * b).sum())
|
||||
.collect();
|
||||
|
||||
// Should be unchanged
|
||||
assert_eq!(result, state);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_creation_with_projection_restriction() {
|
||||
// Projection restrictions reduce dimension
|
||||
let rho = projection_restriction(4, 2);
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
|
||||
// Apply restriction
|
||||
let result: Vec<f32> = rho
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(&state).map(|(a, b)| a * b).sum())
|
||||
.collect();
|
||||
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(result, vec![1.0, 2.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_weight_affects_energy() {
|
||||
// Higher edge weights should amplify residual energy
|
||||
let residual = vec![0.1, 0.1, 0.1];
|
||||
let norm_sq: f32 = residual.iter().map(|x| x * x).sum();
|
||||
|
||||
let low_weight_energy = 1.0 * norm_sq;
|
||||
let high_weight_energy = 10.0 * norm_sq;
|
||||
|
||||
assert!(high_weight_energy > low_weight_energy);
|
||||
assert!((high_weight_energy / low_weight_energy - 10.0).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_restriction_dimension_validation() {
|
||||
// Restriction map dimensions must be compatible with node dimensions
|
||||
let source_dim = 4;
|
||||
let edge_dim = 2;
|
||||
|
||||
// Valid: source_dim -> edge_dim
|
||||
let valid_rho = projection_restriction(source_dim, edge_dim);
|
||||
assert_eq!(valid_rho.len(), edge_dim);
|
||||
assert_eq!(valid_rho[0].len(), source_dim);
|
||||
|
||||
// The restriction should accept 4D input and produce 2D output
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
let result: Vec<f32> = valid_rho
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(&state).map(|(a, b)| a * b).sum())
|
||||
.collect();
|
||||
assert_eq!(result.len(), edge_dim);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SHEAF GRAPH CRUD TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_graph_add_node() {
|
||||
// Adding a node should increase the node count
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
assert_eq!(nodes.len(), 1);
|
||||
|
||||
nodes.insert(2, vec![0.8, 0.6, 0.4]);
|
||||
assert_eq!(nodes.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_add_edge_validates_nodes_exist() {
|
||||
// Edges can only be created between existing nodes
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
let edges: HashMap<(u64, u64), f32> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
// Both nodes exist - edge should be allowed
|
||||
assert!(nodes.contains_key(&1));
|
||||
assert!(nodes.contains_key(&2));
|
||||
|
||||
// Non-existent node - edge should not be allowed
|
||||
assert!(!nodes.contains_key(&999));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_remove_node_cascades_to_edges() {
|
||||
// Removing a node should remove all incident edges
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
let mut edges: HashMap<(u64, u64), f32> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
nodes.insert(3, vec![0.7, 0.4]);
|
||||
|
||||
edges.insert((1, 2), 1.0);
|
||||
edges.insert((2, 3), 1.0);
|
||||
edges.insert((1, 3), 1.0);
|
||||
|
||||
// Remove node 1
|
||||
nodes.remove(&1);
|
||||
edges.retain(|(src, tgt), _| *src != 1 && *tgt != 1);
|
||||
|
||||
assert_eq!(nodes.len(), 2);
|
||||
assert_eq!(edges.len(), 1); // Only (2,3) remains
|
||||
assert!(edges.contains_key(&(2, 3)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_graph_update_node_state() {
|
||||
// Updating a node state should trigger re-computation of affected edges
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
|
||||
nodes.insert(1, vec![1.0, 0.5, 0.3]);
|
||||
|
||||
// Update
|
||||
nodes.insert(1, vec![0.9, 0.6, 0.4]);
|
||||
|
||||
let state = nodes.get(&1).unwrap();
|
||||
assert!((state[0] - 0.9).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// SUBGRAPH EXTRACTION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_subgraph_extraction_bfs() {
|
||||
// Extracting a k-hop subgraph around a center node
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
let mut adjacency: HashMap<u64, Vec<u64>> = HashMap::new();
|
||||
|
||||
// Create a chain: 1 - 2 - 3 - 4 - 5
|
||||
for i in 1..=5 {
|
||||
nodes.insert(i, vec![i as f32; 3]);
|
||||
}
|
||||
adjacency.insert(1, vec![2]);
|
||||
adjacency.insert(2, vec![1, 3]);
|
||||
adjacency.insert(3, vec![2, 4]);
|
||||
adjacency.insert(4, vec![3, 5]);
|
||||
adjacency.insert(5, vec![4]);
|
||||
|
||||
// Extract 1-hop subgraph around node 3
|
||||
fn extract_khop(center: u64, k: usize, adjacency: &HashMap<u64, Vec<u64>>) -> Vec<u64> {
|
||||
let mut visited = vec![center];
|
||||
let mut frontier = vec![center];
|
||||
|
||||
for _ in 0..k {
|
||||
let mut next_frontier = Vec::new();
|
||||
for node in &frontier {
|
||||
if let Some(neighbors) = adjacency.get(node) {
|
||||
for neighbor in neighbors {
|
||||
if !visited.contains(neighbor) {
|
||||
visited.push(*neighbor);
|
||||
next_frontier.push(*neighbor);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
frontier = next_frontier;
|
||||
}
|
||||
visited
|
||||
}
|
||||
|
||||
let subgraph = extract_khop(3, 1, &adjacency);
|
||||
assert!(subgraph.contains(&3)); // Center
|
||||
assert!(subgraph.contains(&2)); // 1-hop neighbor
|
||||
assert!(subgraph.contains(&4)); // 1-hop neighbor
|
||||
assert!(!subgraph.contains(&1)); // 2-hops away
|
||||
assert!(!subgraph.contains(&5)); // 2-hops away
|
||||
|
||||
let larger_subgraph = extract_khop(3, 2, &adjacency);
|
||||
assert_eq!(larger_subgraph.len(), 5); // All nodes within 2 hops
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// NAMESPACE AND SCOPE TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_namespace_isolation() {
|
||||
// Nodes in different namespaces should be isolated
|
||||
let mut namespaces: HashMap<String, Vec<u64>> = HashMap::new();
|
||||
|
||||
namespaces.entry("finance".to_string()).or_default().push(1);
|
||||
namespaces.entry("finance".to_string()).or_default().push(2);
|
||||
namespaces.entry("medical".to_string()).or_default().push(3);
|
||||
|
||||
let finance_nodes = namespaces.get("finance").unwrap();
|
||||
let medical_nodes = namespaces.get("medical").unwrap();
|
||||
|
||||
assert_eq!(finance_nodes.len(), 2);
|
||||
assert_eq!(medical_nodes.len(), 1);
|
||||
|
||||
// No overlap
|
||||
for node in finance_nodes {
|
||||
assert!(!medical_nodes.contains(node));
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FINGERPRINT TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_fingerprint_changes_on_modification() {
|
||||
// Graph fingerprint should change when structure changes
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
fn compute_fingerprint(nodes: &HashMap<u64, Vec<f32>>, edges: &[(u64, u64)]) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
|
||||
let mut node_keys: Vec<_> = nodes.keys().collect();
|
||||
node_keys.sort();
|
||||
for key in node_keys {
|
||||
key.hash(&mut hasher);
|
||||
// Hash state values
|
||||
for val in nodes.get(key).unwrap() {
|
||||
val.to_bits().hash(&mut hasher);
|
||||
}
|
||||
}
|
||||
|
||||
for (src, tgt) in edges {
|
||||
src.hash(&mut hasher);
|
||||
tgt.hash(&mut hasher);
|
||||
}
|
||||
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
let edges1 = vec![(1, 2)];
|
||||
let fp1 = compute_fingerprint(&nodes, &edges1);
|
||||
|
||||
// Add a node
|
||||
nodes.insert(3, vec![0.7, 0.4]);
|
||||
let fp2 = compute_fingerprint(&nodes, &edges1);
|
||||
|
||||
assert_ne!(fp1, fp2);
|
||||
|
||||
// Add an edge
|
||||
let edges2 = vec![(1, 2), (2, 3)];
|
||||
let fp3 = compute_fingerprint(&nodes, &edges2);
|
||||
|
||||
assert_ne!(fp2, fp3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fingerprint_stable_without_modification() {
|
||||
// Fingerprint should be deterministic and stable
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
fn compute_fingerprint(nodes: &HashMap<u64, Vec<f32>>) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
let mut keys: Vec<_> = nodes.keys().collect();
|
||||
keys.sort();
|
||||
for key in keys {
|
||||
key.hash(&mut hasher);
|
||||
}
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::new();
|
||||
nodes.insert(1, vec![1.0, 0.5]);
|
||||
nodes.insert(2, vec![0.8, 0.6]);
|
||||
|
||||
let fp1 = compute_fingerprint(&nodes);
|
||||
let fp2 = compute_fingerprint(&nodes);
|
||||
|
||||
assert_eq!(fp1, fp2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// DIMENSION VALIDATION TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_restriction_map_dimension_compatibility() {
|
||||
// Restriction map output dimension must equal edge stalk dimension
|
||||
struct RestrictionMap {
|
||||
matrix: Vec<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl RestrictionMap {
|
||||
fn new(matrix: Vec<Vec<f32>>) -> Result<Self, &'static str> {
|
||||
if matrix.is_empty() {
|
||||
return Err("Matrix cannot be empty");
|
||||
}
|
||||
let row_len = matrix[0].len();
|
||||
if !matrix.iter().all(|row| row.len() == row_len) {
|
||||
return Err("All rows must have same length");
|
||||
}
|
||||
Ok(Self { matrix })
|
||||
}
|
||||
|
||||
fn input_dim(&self) -> usize {
|
||||
self.matrix[0].len()
|
||||
}
|
||||
|
||||
fn output_dim(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
|
||||
fn apply(&self, input: &[f32]) -> Result<Vec<f32>, &'static str> {
|
||||
if input.len() != self.input_dim() {
|
||||
return Err("Input dimension mismatch");
|
||||
}
|
||||
Ok(self
|
||||
.matrix
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(input).map(|(a, b)| a * b).sum())
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
let rho = RestrictionMap::new(projection_restriction(4, 2)).unwrap();
|
||||
|
||||
// Valid input
|
||||
let result = rho.apply(&[1.0, 2.0, 3.0, 4.0]);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap().len(), 2);
|
||||
|
||||
// Invalid input (wrong dimension)
|
||||
let result = rho.apply(&[1.0, 2.0]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_creation_validates_stalk_dimensions() {
|
||||
// When creating an edge, both restriction maps must project to the same edge stalk dimension
|
||||
let source_dim = 4;
|
||||
let target_dim = 3;
|
||||
let edge_stalk_dim = 2;
|
||||
|
||||
let rho_source = projection_restriction(source_dim, edge_stalk_dim);
|
||||
let rho_target = projection_restriction(target_dim, edge_stalk_dim);
|
||||
|
||||
// Both should output the same dimension
|
||||
assert_eq!(rho_source.len(), edge_stalk_dim);
|
||||
assert_eq!(rho_target.len(), edge_stalk_dim);
|
||||
|
||||
// Source accepts source_dim input
|
||||
assert_eq!(rho_source[0].len(), source_dim);
|
||||
|
||||
// Target accepts target_dim input
|
||||
assert_eq!(rho_target[0].len(), target_dim);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// CONCURRENT ACCESS TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_node_reads() {
|
||||
// Multiple threads should be able to read nodes concurrently
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
let nodes: Arc<HashMap<u64, Vec<f32>>> = Arc::new({
|
||||
let mut map = HashMap::new();
|
||||
for i in 0..100 {
|
||||
map.insert(i, vec![i as f32; 4]);
|
||||
}
|
||||
map
|
||||
});
|
||||
|
||||
let handles: Vec<_> = (0..4)
|
||||
.map(|_| {
|
||||
let nodes_clone = Arc::clone(&nodes);
|
||||
thread::spawn(move || {
|
||||
let mut sum = 0.0;
|
||||
for i in 0..100 {
|
||||
if let Some(state) = nodes_clone.get(&i) {
|
||||
sum += state[0];
|
||||
}
|
||||
}
|
||||
sum
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results: Vec<f32> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// All threads should compute the same sum
|
||||
let expected_sum: f32 = (0..100).map(|i| i as f32).sum();
|
||||
for result in results {
|
||||
assert!((result - expected_sum).abs() < f32::EPSILON);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LARGE GRAPH TESTS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_large_graph_creation() {
|
||||
// Test creation of a moderately large graph
|
||||
let num_nodes = 1000;
|
||||
let dim = 16;
|
||||
|
||||
let mut nodes: HashMap<u64, Vec<f32>> = HashMap::with_capacity(num_nodes);
|
||||
|
||||
for i in 0..num_nodes {
|
||||
let state: Vec<f32> = (0..dim).map(|j| (i * dim + j) as f32 / 1000.0).collect();
|
||||
nodes.insert(i as u64, state);
|
||||
}
|
||||
|
||||
assert_eq!(nodes.len(), num_nodes);
|
||||
|
||||
// Verify random access
|
||||
let node_500 = nodes.get(&500).unwrap();
|
||||
assert_eq!(node_500.len(), dim);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sparse_graph_edge_ratio() {
|
||||
// Sparse graphs should have edges << nodes^2
|
||||
let num_nodes = 100;
|
||||
let avg_degree = 4;
|
||||
|
||||
let num_edges = (num_nodes * avg_degree) / 2; // Undirected
|
||||
let max_edges = num_nodes * (num_nodes - 1) / 2;
|
||||
let sparsity = num_edges as f64 / max_edges as f64;
|
||||
|
||||
assert!(sparsity < 0.1); // Less than 10% of possible edges
|
||||
}
|
||||
13
vendor/ruvector/crates/prime-radiant/tests/integration/mod.rs
vendored
Normal file
13
vendor/ruvector/crates/prime-radiant/tests/integration/mod.rs
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
//! Integration tests for Prime-Radiant Coherence Engine
|
||||
//!
|
||||
//! This module contains integration tests organized by bounded context:
|
||||
//!
|
||||
//! - `graph_tests`: SheafGraph CRUD operations and dimension validation
|
||||
//! - `coherence_tests`: Energy computation and incremental updates
|
||||
//! - `governance_tests`: Policy bundles and witness chain integrity
|
||||
//! - `gate_tests`: Compute ladder escalation and persistence detection
|
||||
|
||||
mod coherence_tests;
|
||||
mod gate_tests;
|
||||
mod governance_tests;
|
||||
mod graph_tests;
|
||||
681
vendor/ruvector/crates/prime-radiant/tests/property/coherence_properties.rs
vendored
Normal file
681
vendor/ruvector/crates/prime-radiant/tests/property/coherence_properties.rs
vendored
Normal file
@@ -0,0 +1,681 @@
|
||||
//! Property-based tests for coherence computation invariants
|
||||
//!
|
||||
//! Mathematical invariants tested:
|
||||
//! 1. Energy is always non-negative (E >= 0)
|
||||
//! 2. Consistent sections have zero energy (rho_u(x_u) = rho_v(x_v) => E = 0)
|
||||
//! 3. Residual symmetry (r_{u,v} = -r_{v,u})
|
||||
//! 4. Energy scales with weight (E(w*e) = w * E(e) for w >= 0)
|
||||
//! 5. Triangle inequality for distances derived from energy
|
||||
|
||||
use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
|
||||
use quickcheck_macros::quickcheck;
|
||||
use rand::Rng;
|
||||
|
||||
// ============================================================================
|
||||
// TEST TYPES WITH ARBITRARY IMPLEMENTATIONS
|
||||
// ============================================================================
|
||||
|
||||
/// A bounded float for testing (avoids infinities and NaN)
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct BoundedFloat(f32);
|
||||
|
||||
impl Arbitrary for BoundedFloat {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
// Use i32 to generate a bounded integer, then convert to float
|
||||
// This avoids NaN and Inf that f32::arbitrary can produce
|
||||
let val: i32 = i32::arbitrary(g);
|
||||
let float_val = (val as f32 / (i32::MAX as f32 / 1000.0)).clamp(-1000.0, 1000.0);
|
||||
BoundedFloat(float_val)
|
||||
}
|
||||
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
Box::new(std::iter::empty())
|
||||
}
|
||||
}
|
||||
|
||||
/// A non-negative float for weights
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct NonNegativeFloat(f32);
|
||||
|
||||
impl Arbitrary for NonNegativeFloat {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
// Use u32 to generate a bounded non-negative integer, then convert to float
|
||||
let val: u32 = u32::arbitrary(g);
|
||||
let float_val = (val as f32 / (u32::MAX as f32 / 1000.0)).min(1000.0);
|
||||
NonNegativeFloat(float_val)
|
||||
}
|
||||
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
Box::new(std::iter::empty())
|
||||
}
|
||||
}
|
||||
|
||||
/// A positive float (> 0) for weights
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct PositiveFloat(f32);
|
||||
|
||||
impl Arbitrary for PositiveFloat {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
// Use u32 to generate a bounded positive integer, then convert to float
|
||||
let val: u32 = u32::arbitrary(g);
|
||||
let float_val = (val as f32 / (u32::MAX as f32 / 1000.0))
|
||||
.max(0.001)
|
||||
.min(1000.0);
|
||||
PositiveFloat(float_val)
|
||||
}
|
||||
}
|
||||
|
||||
/// A state vector of fixed dimension
|
||||
#[derive(Clone, Debug)]
|
||||
struct StateVector {
|
||||
values: Vec<f32>,
|
||||
}
|
||||
|
||||
impl StateVector {
|
||||
fn new(values: Vec<f32>) -> Self {
|
||||
Self { values }
|
||||
}
|
||||
|
||||
fn dim(&self) -> usize {
|
||||
self.values.len()
|
||||
}
|
||||
|
||||
fn zeros(dim: usize) -> Self {
|
||||
Self {
|
||||
values: vec![0.0; dim],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for StateVector {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let dim = usize::arbitrary(g) % 8 + 1; // 1-8 dimensions
|
||||
let values: Vec<f32> = (0..dim)
|
||||
.map(|_| {
|
||||
let bf = BoundedFloat::arbitrary(g);
|
||||
bf.0
|
||||
})
|
||||
.collect();
|
||||
StateVector::new(values)
|
||||
}
|
||||
|
||||
// Empty shrink to avoid stack overflow from recursive shrinking
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
Box::new(std::iter::empty())
|
||||
}
|
||||
}
|
||||
|
||||
/// Identity restriction map
|
||||
#[derive(Clone, Debug)]
|
||||
struct IdentityMap {
|
||||
dim: usize,
|
||||
}
|
||||
|
||||
impl IdentityMap {
|
||||
fn apply(&self, input: &[f32]) -> Vec<f32> {
|
||||
input.to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for IdentityMap {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let dim = usize::arbitrary(g) % 8 + 1;
|
||||
Self { dim }
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple restriction map (linear transform)
|
||||
#[derive(Clone, Debug)]
|
||||
struct SimpleRestrictionMap {
|
||||
matrix: Vec<Vec<f32>>,
|
||||
}
|
||||
|
||||
impl SimpleRestrictionMap {
|
||||
fn identity(dim: usize) -> Self {
|
||||
let matrix = (0..dim)
|
||||
.map(|i| {
|
||||
let mut row = vec![0.0; dim];
|
||||
row[i] = 1.0;
|
||||
row
|
||||
})
|
||||
.collect();
|
||||
Self { matrix }
|
||||
}
|
||||
|
||||
fn apply(&self, input: &[f32]) -> Vec<f32> {
|
||||
self.matrix
|
||||
.iter()
|
||||
.map(|row| row.iter().zip(input).map(|(a, b)| a * b).sum())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn output_dim(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
|
||||
fn input_dim(&self) -> usize {
|
||||
if self.matrix.is_empty() {
|
||||
0
|
||||
} else {
|
||||
self.matrix[0].len()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for SimpleRestrictionMap {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let input_dim = usize::arbitrary(g) % 6 + 2; // 2-7 dimensions
|
||||
let output_dim = usize::arbitrary(g) % 6 + 2;
|
||||
|
||||
let matrix: Vec<Vec<f32>> = (0..output_dim)
|
||||
.map(|_| {
|
||||
(0..input_dim)
|
||||
.map(|_| {
|
||||
let bf = BoundedFloat::arbitrary(g);
|
||||
bf.0 / 100.0 // Scale down for stability
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
|
||||
Self { matrix }
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// HELPER FUNCTIONS
|
||||
// ============================================================================
|
||||
|
||||
/// Compute residual: rho_source(x_source) - rho_target(x_target)
|
||||
fn compute_residual(
|
||||
source_state: &[f32],
|
||||
target_state: &[f32],
|
||||
rho_source: &SimpleRestrictionMap,
|
||||
rho_target: &SimpleRestrictionMap,
|
||||
) -> Vec<f32> {
|
||||
let projected_source = rho_source.apply(source_state);
|
||||
let projected_target = rho_target.apply(target_state);
|
||||
|
||||
projected_source
|
||||
.iter()
|
||||
.zip(&projected_target)
|
||||
.map(|(a, b)| a - b)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Compute energy from residual and weight
|
||||
fn compute_energy(residual: &[f32], weight: f32) -> f32 {
|
||||
let norm_sq: f32 = residual.iter().map(|x| x * x).sum();
|
||||
weight * norm_sq
|
||||
}
|
||||
|
||||
/// Compute total energy for a graph
|
||||
fn compute_total_energy(states: &[(usize, Vec<f32>)], edges: &[(usize, usize, f32)]) -> f32 {
|
||||
let dim = if states.is_empty() {
|
||||
0
|
||||
} else {
|
||||
states[0].1.len()
|
||||
};
|
||||
let rho = SimpleRestrictionMap::identity(dim);
|
||||
|
||||
let mut total: f32 = 0.0;
|
||||
for &(src, tgt, weight) in edges {
|
||||
if let (Some((_, src_state)), Some((_, tgt_state))) = (
|
||||
states.iter().find(|(id, _)| *id == src),
|
||||
states.iter().find(|(id, _)| *id == tgt),
|
||||
) {
|
||||
let residual = compute_residual(src_state, tgt_state, &rho, &rho);
|
||||
total += compute_energy(&residual, weight);
|
||||
}
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: ENERGY IS NON-NEGATIVE
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_energy_nonnegative(
|
||||
source: StateVector,
|
||||
target: StateVector,
|
||||
weight: NonNegativeFloat,
|
||||
) -> TestResult {
|
||||
// Skip if dimensions don't match
|
||||
if source.dim() != target.dim() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(source.dim());
|
||||
let residual = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
let energy = compute_energy(&residual, weight.0);
|
||||
|
||||
if energy >= 0.0 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::failed()
|
||||
}
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_energy_nonnegative_arbitrary_restriction(
|
||||
source: StateVector,
|
||||
target: StateVector,
|
||||
weight: NonNegativeFloat,
|
||||
) -> TestResult {
|
||||
// Skip if dimensions are incompatible
|
||||
if source.dim() == 0 || target.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let common_dim = source.dim().min(target.dim()).min(4);
|
||||
let rho_src = SimpleRestrictionMap::identity(common_dim);
|
||||
let rho_tgt = SimpleRestrictionMap::identity(common_dim);
|
||||
|
||||
// Truncate to common dimension
|
||||
let src_truncated: Vec<f32> = source.values.iter().take(common_dim).copied().collect();
|
||||
let tgt_truncated: Vec<f32> = target.values.iter().take(common_dim).copied().collect();
|
||||
|
||||
let residual = compute_residual(&src_truncated, &tgt_truncated, &rho_src, &rho_tgt);
|
||||
let energy = compute_energy(&residual, weight.0);
|
||||
|
||||
if energy >= 0.0 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::failed()
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: CONSISTENT SECTIONS HAVE ZERO ENERGY
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_consistent_section_zero_energy(state: StateVector, weight: PositiveFloat) -> TestResult {
|
||||
if state.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
// Same state on both ends of edge (consistent section)
|
||||
let rho = SimpleRestrictionMap::identity(state.dim());
|
||||
let residual = compute_residual(&state.values, &state.values, &rho, &rho);
|
||||
let energy = compute_energy(&residual, weight.0);
|
||||
|
||||
// Energy should be zero (within floating point tolerance)
|
||||
if energy.abs() < 1e-6 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!("Expected zero energy, got {}", energy))
|
||||
}
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_uniform_states_zero_energy(state: StateVector, n_nodes: u8) -> TestResult {
|
||||
let n = (n_nodes % 10 + 2) as usize; // 2-11 nodes
|
||||
if state.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
// Create a path graph with uniform states
|
||||
let states: Vec<(usize, Vec<f32>)> = (0..n).map(|i| (i, state.values.clone())).collect();
|
||||
|
||||
let edges: Vec<(usize, usize, f32)> = (0..n - 1).map(|i| (i, i + 1, 1.0)).collect();
|
||||
|
||||
let total_energy = compute_total_energy(&states, &edges);
|
||||
|
||||
if total_energy.abs() < 1e-6 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!("Expected zero energy, got {}", total_energy))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: RESIDUAL SYMMETRY
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_residual_symmetry(source: StateVector, target: StateVector) -> TestResult {
|
||||
if source.dim() != target.dim() || source.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(source.dim());
|
||||
|
||||
// r_{u,v} = rho(x_u) - rho(x_v)
|
||||
let r_uv = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
|
||||
// r_{v,u} = rho(x_v) - rho(x_u)
|
||||
let r_vu = compute_residual(&target.values, &source.values, &rho, &rho);
|
||||
|
||||
// Check r_uv = -r_vu
|
||||
for (a, b) in r_uv.iter().zip(&r_vu) {
|
||||
if (a + b).abs() > 1e-6 {
|
||||
return TestResult::error(format!("Symmetry violated: {} != -{}", a, b));
|
||||
}
|
||||
}
|
||||
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_residual_energy_symmetric(
|
||||
source: StateVector,
|
||||
target: StateVector,
|
||||
weight: PositiveFloat,
|
||||
) -> TestResult {
|
||||
if source.dim() != target.dim() || source.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(source.dim());
|
||||
|
||||
let r_uv = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
let r_vu = compute_residual(&target.values, &source.values, &rho, &rho);
|
||||
|
||||
let e_uv = compute_energy(&r_uv, weight.0);
|
||||
let e_vu = compute_energy(&r_vu, weight.0);
|
||||
|
||||
// Energy should be the same regardless of direction
|
||||
if (e_uv - e_vu).abs() < 1e-6 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!("Energy not symmetric: {} vs {}", e_uv, e_vu))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: ENERGY SCALES WITH WEIGHT
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_energy_scales_with_weight(
|
||||
source: StateVector,
|
||||
target: StateVector,
|
||||
weight1: PositiveFloat,
|
||||
scale: PositiveFloat,
|
||||
) -> TestResult {
|
||||
if source.dim() != target.dim() || source.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
// Limit scale to avoid overflow
|
||||
let scale = scale.0.min(100.0);
|
||||
if scale < 0.01 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(source.dim());
|
||||
let residual = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
|
||||
let e1 = compute_energy(&residual, weight1.0);
|
||||
let e2 = compute_energy(&residual, weight1.0 * scale);
|
||||
|
||||
// e2 should be approximately scale * e1
|
||||
let expected = e1 * scale;
|
||||
if (e2 - expected).abs() < 1e-4 * expected.abs().max(1.0) {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!(
|
||||
"Scaling failed: {} * {} = {}, but got {}",
|
||||
e1, scale, expected, e2
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_zero_weight_zero_energy(source: StateVector, target: StateVector) -> TestResult {
|
||||
if source.dim() != target.dim() || source.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(source.dim());
|
||||
let residual = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
let energy = compute_energy(&residual, 0.0);
|
||||
|
||||
if energy.abs() < 1e-10 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!(
|
||||
"Zero weight should give zero energy, got {}",
|
||||
energy
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: TOTAL ENERGY IS SUM OF EDGE ENERGIES
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_energy_additivity(
|
||||
state1: StateVector,
|
||||
state2: StateVector,
|
||||
state3: StateVector,
|
||||
) -> TestResult {
|
||||
// Ensure all states have the same dimension
|
||||
let dim = state1.dim();
|
||||
if dim == 0 || state2.dim() != dim || state3.dim() != dim {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(dim);
|
||||
|
||||
// Compute individual edge energies
|
||||
let r_12 = compute_residual(&state1.values, &state2.values, &rho, &rho);
|
||||
let r_23 = compute_residual(&state2.values, &state3.values, &rho, &rho);
|
||||
|
||||
let e_12 = compute_energy(&r_12, 1.0);
|
||||
let e_23 = compute_energy(&r_23, 1.0);
|
||||
|
||||
// Compute total via helper
|
||||
let states = vec![
|
||||
(0, state1.values.clone()),
|
||||
(1, state2.values.clone()),
|
||||
(2, state3.values.clone()),
|
||||
];
|
||||
let edges = vec![(0, 1, 1.0), (1, 2, 1.0)];
|
||||
let total = compute_total_energy(&states, &edges);
|
||||
|
||||
let expected = e_12 + e_23;
|
||||
if (total - expected).abs() < 1e-6 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!(
|
||||
"Additivity failed: {} + {} != {}",
|
||||
e_12, e_23, total
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: ENERGY MONOTONICITY IN DEVIATION
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_energy_increases_with_deviation(
|
||||
base_state: StateVector,
|
||||
small_delta: BoundedFloat,
|
||||
large_delta: BoundedFloat,
|
||||
) -> TestResult {
|
||||
if base_state.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let small = small_delta.0.abs().min(1.0);
|
||||
let large = large_delta.0.abs().max(small + 0.1).min(10.0);
|
||||
|
||||
// Create states with different deviations
|
||||
let target = base_state.values.clone();
|
||||
let source_small: Vec<f32> = base_state.values.iter().map(|x| x + small).collect();
|
||||
let source_large: Vec<f32> = base_state.values.iter().map(|x| x + large).collect();
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(base_state.dim());
|
||||
|
||||
let r_small = compute_residual(&source_small, &target, &rho, &rho);
|
||||
let r_large = compute_residual(&source_large, &target, &rho, &rho);
|
||||
|
||||
let e_small = compute_energy(&r_small, 1.0);
|
||||
let e_large = compute_energy(&r_large, 1.0);
|
||||
|
||||
// Larger deviation should produce larger energy
|
||||
if e_large >= e_small - 1e-6 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!(
|
||||
"Energy should increase with deviation: {} < {}",
|
||||
e_large, e_small
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: RESTRICTION MAP COMPOSITION
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_identity_map_preserves_state(state: StateVector) -> TestResult {
|
||||
if state.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(state.dim());
|
||||
let projected = rho.apply(&state.values);
|
||||
|
||||
// Identity should preserve the state
|
||||
for (orig, proj) in state.values.iter().zip(&projected) {
|
||||
if (orig - proj).abs() > 1e-6 {
|
||||
return TestResult::error(format!("Identity map changed state: {} -> {}", orig, proj));
|
||||
}
|
||||
}
|
||||
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: EDGE CONTRACTION REDUCES ENERGY
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_averaging_reduces_energy(state1: StateVector, state2: StateVector) -> TestResult {
|
||||
if state1.dim() != state2.dim() || state1.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(state1.dim());
|
||||
|
||||
// Original energy
|
||||
let r_orig = compute_residual(&state1.values, &state2.values, &rho, &rho);
|
||||
let e_orig = compute_energy(&r_orig, 1.0);
|
||||
|
||||
// Average state
|
||||
let avg: Vec<f32> = state1
|
||||
.values
|
||||
.iter()
|
||||
.zip(&state2.values)
|
||||
.map(|(a, b)| (a + b) / 2.0)
|
||||
.collect();
|
||||
|
||||
// Energy when one node takes the average
|
||||
let r_new = compute_residual(&avg, &state2.values, &rho, &rho);
|
||||
let e_new = compute_energy(&r_new, 1.0);
|
||||
|
||||
// Energy should decrease or stay the same
|
||||
if e_new <= e_orig + 1e-6 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!(
|
||||
"Averaging should reduce energy: {} -> {}",
|
||||
e_orig, e_new
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: NUMERIC STABILITY
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_stable_for_large_values() {
|
||||
// Test large value stability without using quickcheck's recursive shrinking
|
||||
for dim in 1..=8 {
|
||||
let state: Vec<f32> = (0..dim).map(|i| (i as f32) * 100.0 + 0.5).collect();
|
||||
let large_state: Vec<f32> = state.iter().map(|x| x * 1000.0).collect();
|
||||
let rho = SimpleRestrictionMap::identity(dim);
|
||||
|
||||
let residual = compute_residual(&large_state, &state, &rho, &rho);
|
||||
let energy = compute_energy(&residual, 1.0);
|
||||
|
||||
assert!(!energy.is_nan(), "Energy became NaN for dim {}", dim);
|
||||
assert!(!energy.is_infinite(), "Energy became Inf for dim {}", dim);
|
||||
assert!(
|
||||
energy >= 0.0,
|
||||
"Energy became negative for dim {}: {}",
|
||||
dim,
|
||||
energy
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_stable_for_small_values() {
|
||||
// Test small value stability without using quickcheck's recursive shrinking
|
||||
for dim in 1..=8 {
|
||||
let state: Vec<f32> = (0..dim).map(|i| (i as f32) * 0.1 + 0.01).collect();
|
||||
let small_state: Vec<f32> = state.iter().map(|x| x / 1000.0).collect();
|
||||
let zeros: Vec<f32> = vec![0.0; dim];
|
||||
let rho = SimpleRestrictionMap::identity(dim);
|
||||
|
||||
let residual = compute_residual(&small_state, &zeros, &rho, &rho);
|
||||
let energy = compute_energy(&residual, 1.0);
|
||||
|
||||
assert!(!energy.is_nan(), "Energy became NaN for dim {}", dim);
|
||||
assert!(!energy.is_infinite(), "Energy became Inf for dim {}", dim);
|
||||
assert!(
|
||||
energy >= 0.0,
|
||||
"Energy became negative for dim {}: {}",
|
||||
dim,
|
||||
energy
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// PROPERTY: DETERMINISM
|
||||
// ============================================================================
|
||||
|
||||
#[quickcheck]
|
||||
fn prop_energy_computation_deterministic(
|
||||
source: StateVector,
|
||||
target: StateVector,
|
||||
weight: PositiveFloat,
|
||||
) -> TestResult {
|
||||
if source.dim() != target.dim() || source.dim() == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let rho = SimpleRestrictionMap::identity(source.dim());
|
||||
|
||||
// Compute energy multiple times
|
||||
let e1 = {
|
||||
let r = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
compute_energy(&r, weight.0)
|
||||
};
|
||||
|
||||
let e2 = {
|
||||
let r = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
compute_energy(&r, weight.0)
|
||||
};
|
||||
|
||||
let e3 = {
|
||||
let r = compute_residual(&source.values, &target.values, &rho, &rho);
|
||||
compute_energy(&r, weight.0)
|
||||
};
|
||||
|
||||
// All results should be identical
|
||||
if (e1 - e2).abs() < 1e-10 && (e2 - e3).abs() < 1e-10 {
|
||||
TestResult::passed()
|
||||
} else {
|
||||
TestResult::error(format!("Non-deterministic results: {}, {}, {}", e1, e2, e3))
|
||||
}
|
||||
}
|
||||
6
vendor/ruvector/crates/prime-radiant/tests/property/mod.rs
vendored
Normal file
6
vendor/ruvector/crates/prime-radiant/tests/property/mod.rs
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
//! Property-based tests for Prime-Radiant Coherence Engine
|
||||
//!
|
||||
//! This module contains property-based tests using quickcheck to verify
|
||||
//! mathematical invariants that must hold for all inputs.
|
||||
|
||||
mod coherence_properties;
|
||||
805
vendor/ruvector/crates/prime-radiant/tests/replay_determinism.rs
vendored
Normal file
805
vendor/ruvector/crates/prime-radiant/tests/replay_determinism.rs
vendored
Normal file
@@ -0,0 +1,805 @@
|
||||
//! Replay Determinism Tests
|
||||
//!
|
||||
//! Verifies that replaying the same sequence of events produces identical state.
|
||||
//! This is critical for:
|
||||
//! - Reproducible debugging
|
||||
//! - Witness chain validation
|
||||
//! - Distributed consensus
|
||||
//! - Audit trail verification
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
// ============================================================================
|
||||
// EVENT TYPES
|
||||
// ============================================================================
|
||||
|
||||
/// Domain events that can modify coherence state
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
enum DomainEvent {
|
||||
/// Add a node with initial state
|
||||
NodeAdded {
|
||||
node_id: u64,
|
||||
state: Vec<f32>,
|
||||
timestamp: u64,
|
||||
},
|
||||
/// Update a node's state
|
||||
NodeUpdated {
|
||||
node_id: u64,
|
||||
old_state: Vec<f32>,
|
||||
new_state: Vec<f32>,
|
||||
timestamp: u64,
|
||||
},
|
||||
/// Remove a node
|
||||
NodeRemoved { node_id: u64, timestamp: u64 },
|
||||
/// Add an edge between nodes
|
||||
EdgeAdded {
|
||||
source: u64,
|
||||
target: u64,
|
||||
weight: f32,
|
||||
timestamp: u64,
|
||||
},
|
||||
/// Update edge weight
|
||||
EdgeWeightUpdated {
|
||||
source: u64,
|
||||
target: u64,
|
||||
old_weight: f32,
|
||||
new_weight: f32,
|
||||
timestamp: u64,
|
||||
},
|
||||
/// Remove an edge
|
||||
EdgeRemoved {
|
||||
source: u64,
|
||||
target: u64,
|
||||
timestamp: u64,
|
||||
},
|
||||
/// Policy threshold change
|
||||
ThresholdChanged {
|
||||
scope: String,
|
||||
old_threshold: f32,
|
||||
new_threshold: f32,
|
||||
timestamp: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl DomainEvent {
|
||||
fn timestamp(&self) -> u64 {
|
||||
match self {
|
||||
DomainEvent::NodeAdded { timestamp, .. } => *timestamp,
|
||||
DomainEvent::NodeUpdated { timestamp, .. } => *timestamp,
|
||||
DomainEvent::NodeRemoved { timestamp, .. } => *timestamp,
|
||||
DomainEvent::EdgeAdded { timestamp, .. } => *timestamp,
|
||||
DomainEvent::EdgeWeightUpdated { timestamp, .. } => *timestamp,
|
||||
DomainEvent::EdgeRemoved { timestamp, .. } => *timestamp,
|
||||
DomainEvent::ThresholdChanged { timestamp, .. } => *timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// COHERENCE STATE
|
||||
// ============================================================================
|
||||
|
||||
/// Coherence engine state (simplified for testing)
|
||||
#[derive(Clone, Debug)]
|
||||
struct CoherenceState {
|
||||
nodes: HashMap<u64, Vec<f32>>,
|
||||
edges: HashMap<(u64, u64), f32>,
|
||||
thresholds: HashMap<String, f32>,
|
||||
energy_cache: Option<f32>,
|
||||
event_count: u64,
|
||||
}
|
||||
|
||||
impl CoherenceState {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
nodes: HashMap::new(),
|
||||
edges: HashMap::new(),
|
||||
thresholds: HashMap::new(),
|
||||
energy_cache: None,
|
||||
event_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn apply(&mut self, event: &DomainEvent) {
|
||||
self.event_count += 1;
|
||||
self.energy_cache = None; // Invalidate cache
|
||||
|
||||
match event {
|
||||
DomainEvent::NodeAdded { node_id, state, .. } => {
|
||||
self.nodes.insert(*node_id, state.clone());
|
||||
}
|
||||
DomainEvent::NodeUpdated {
|
||||
node_id, new_state, ..
|
||||
} => {
|
||||
self.nodes.insert(*node_id, new_state.clone());
|
||||
}
|
||||
DomainEvent::NodeRemoved { node_id, .. } => {
|
||||
self.nodes.remove(node_id);
|
||||
// Remove incident edges
|
||||
self.edges
|
||||
.retain(|(s, t), _| *s != *node_id && *t != *node_id);
|
||||
}
|
||||
DomainEvent::EdgeAdded {
|
||||
source,
|
||||
target,
|
||||
weight,
|
||||
..
|
||||
} => {
|
||||
self.edges.insert((*source, *target), *weight);
|
||||
}
|
||||
DomainEvent::EdgeWeightUpdated {
|
||||
source,
|
||||
target,
|
||||
new_weight,
|
||||
..
|
||||
} => {
|
||||
self.edges.insert((*source, *target), *new_weight);
|
||||
}
|
||||
DomainEvent::EdgeRemoved { source, target, .. } => {
|
||||
self.edges.remove(&(*source, *target));
|
||||
}
|
||||
DomainEvent::ThresholdChanged {
|
||||
scope,
|
||||
new_threshold,
|
||||
..
|
||||
} => {
|
||||
self.thresholds.insert(scope.clone(), *new_threshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_energy(&mut self) -> f32 {
|
||||
if let Some(cached) = self.energy_cache {
|
||||
return cached;
|
||||
}
|
||||
|
||||
let mut total = 0.0;
|
||||
for ((src, tgt), weight) in &self.edges {
|
||||
if let (Some(src_state), Some(tgt_state)) = (self.nodes.get(src), self.nodes.get(tgt)) {
|
||||
let dim = src_state.len().min(tgt_state.len());
|
||||
let residual_norm_sq: f32 = src_state
|
||||
.iter()
|
||||
.take(dim)
|
||||
.zip(tgt_state.iter().take(dim))
|
||||
.map(|(a, b)| (a - b).powi(2))
|
||||
.sum();
|
||||
total += weight * residual_norm_sq;
|
||||
}
|
||||
}
|
||||
|
||||
self.energy_cache = Some(total);
|
||||
total
|
||||
}
|
||||
|
||||
/// Compute a deterministic fingerprint of the state
|
||||
fn fingerprint(&self) -> u64 {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
|
||||
let mut hasher = DefaultHasher::new();
|
||||
|
||||
// Hash nodes in sorted order
|
||||
let mut node_keys: Vec<_> = self.nodes.keys().collect();
|
||||
node_keys.sort();
|
||||
for key in node_keys {
|
||||
key.hash(&mut hasher);
|
||||
let state = self.nodes.get(key).unwrap();
|
||||
for val in state {
|
||||
val.to_bits().hash(&mut hasher);
|
||||
}
|
||||
}
|
||||
|
||||
// Hash edges in sorted order
|
||||
let mut edge_keys: Vec<_> = self.edges.keys().collect();
|
||||
edge_keys.sort();
|
||||
for key in edge_keys {
|
||||
key.hash(&mut hasher);
|
||||
let weight = self.edges.get(key).unwrap();
|
||||
weight.to_bits().hash(&mut hasher);
|
||||
}
|
||||
|
||||
// Hash thresholds
|
||||
let mut threshold_keys: Vec<_> = self.thresholds.keys().collect();
|
||||
threshold_keys.sort();
|
||||
for key in threshold_keys {
|
||||
key.hash(&mut hasher);
|
||||
let val = self.thresholds.get(key).unwrap();
|
||||
val.to_bits().hash(&mut hasher);
|
||||
}
|
||||
|
||||
hasher.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// EVENT LOG
|
||||
// ============================================================================
|
||||
|
||||
/// Event log for replay
|
||||
#[derive(Clone, Debug)]
|
||||
struct EventLog {
|
||||
events: Vec<DomainEvent>,
|
||||
}
|
||||
|
||||
impl EventLog {
|
||||
fn new() -> Self {
|
||||
Self { events: Vec::new() }
|
||||
}
|
||||
|
||||
fn append(&mut self, event: DomainEvent) {
|
||||
self.events.push(event);
|
||||
}
|
||||
|
||||
fn replay(&self) -> CoherenceState {
|
||||
let mut state = CoherenceState::new();
|
||||
for event in &self.events {
|
||||
state.apply(event);
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
fn replay_until(&self, timestamp: u64) -> CoherenceState {
|
||||
let mut state = CoherenceState::new();
|
||||
for event in &self.events {
|
||||
if event.timestamp() <= timestamp {
|
||||
state.apply(event);
|
||||
}
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.events.len()
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: BASIC REPLAY DETERMINISM
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_empty_replay() {
|
||||
let log = EventLog::new();
|
||||
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(state1.event_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_event_replay() {
|
||||
let mut log = EventLog::new();
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0, 0.5, 0.3],
|
||||
timestamp: 1000,
|
||||
});
|
||||
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(state1.nodes.len(), 1);
|
||||
assert_eq!(state2.nodes.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multiple_events_replay() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
// Create a small graph
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0, 0.0],
|
||||
timestamp: 1000,
|
||||
});
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 2,
|
||||
state: vec![0.5, 0.5],
|
||||
timestamp: 1001,
|
||||
});
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 3,
|
||||
state: vec![0.0, 1.0],
|
||||
timestamp: 1002,
|
||||
});
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
timestamp: 1003,
|
||||
});
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: 2,
|
||||
target: 3,
|
||||
weight: 1.0,
|
||||
timestamp: 1004,
|
||||
});
|
||||
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
let state3 = log.replay();
|
||||
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(state2.fingerprint(), state3.fingerprint());
|
||||
|
||||
assert_eq!(state1.nodes.len(), 3);
|
||||
assert_eq!(state1.edges.len(), 2);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: ENERGY DETERMINISM
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_energy_determinism() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0, 0.5, 0.3],
|
||||
timestamp: 1000,
|
||||
});
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 2,
|
||||
state: vec![0.8, 0.6, 0.4],
|
||||
timestamp: 1001,
|
||||
});
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
timestamp: 1002,
|
||||
});
|
||||
|
||||
let mut state1 = log.replay();
|
||||
let mut state2 = log.replay();
|
||||
|
||||
let energy1 = state1.compute_energy();
|
||||
let energy2 = state2.compute_energy();
|
||||
|
||||
assert!(
|
||||
(energy1 - energy2).abs() < 1e-10,
|
||||
"Energy should be deterministic: {} vs {}",
|
||||
energy1,
|
||||
energy2
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_energy_determinism_after_updates() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0, 0.5],
|
||||
timestamp: 1000,
|
||||
});
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 2,
|
||||
state: vec![0.5, 0.5],
|
||||
timestamp: 1001,
|
||||
});
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
timestamp: 1002,
|
||||
});
|
||||
log.append(DomainEvent::NodeUpdated {
|
||||
node_id: 1,
|
||||
old_state: vec![1.0, 0.5],
|
||||
new_state: vec![0.7, 0.6],
|
||||
timestamp: 1003,
|
||||
});
|
||||
log.append(DomainEvent::EdgeWeightUpdated {
|
||||
source: 1,
|
||||
target: 2,
|
||||
old_weight: 1.0,
|
||||
new_weight: 2.0,
|
||||
timestamp: 1004,
|
||||
});
|
||||
|
||||
let mut state1 = log.replay();
|
||||
let mut state2 = log.replay();
|
||||
|
||||
let energy1 = state1.compute_energy();
|
||||
let energy2 = state2.compute_energy();
|
||||
|
||||
assert!(
|
||||
(energy1 - energy2).abs() < 1e-10,
|
||||
"Energy should be deterministic after updates"
|
||||
);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: PARTIAL REPLAY
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_partial_replay_consistent() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
for i in 1..=10 {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![i as f32 / 10.0],
|
||||
timestamp: 1000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
// Replay until different points
|
||||
let state_5 = log.replay_until(1005);
|
||||
let state_10 = log.replay_until(1010);
|
||||
|
||||
assert_eq!(state_5.nodes.len(), 5);
|
||||
assert_eq!(state_10.nodes.len(), 10);
|
||||
|
||||
// Replaying to the same point should give the same state
|
||||
let state_5_again = log.replay_until(1005);
|
||||
assert_eq!(state_5.fingerprint(), state_5_again.fingerprint());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_replay_monotonic() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
for i in 1..=5 {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![i as f32],
|
||||
timestamp: 1000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
// State should grow monotonically with timestamp
|
||||
let mut prev_nodes = 0;
|
||||
for t in 1001..=1005 {
|
||||
let state = log.replay_until(t);
|
||||
assert!(state.nodes.len() > prev_nodes || prev_nodes == 0);
|
||||
prev_nodes = state.nodes.len();
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: EVENT ORDER INDEPENDENCE
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_independent_events_commute() {
|
||||
// Events on different parts of the graph should commute
|
||||
let events_a = vec![
|
||||
DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0],
|
||||
timestamp: 1000,
|
||||
},
|
||||
DomainEvent::NodeAdded {
|
||||
node_id: 2,
|
||||
state: vec![2.0],
|
||||
timestamp: 1001,
|
||||
},
|
||||
];
|
||||
|
||||
let events_b = vec![
|
||||
DomainEvent::NodeAdded {
|
||||
node_id: 2,
|
||||
state: vec![2.0],
|
||||
timestamp: 1001,
|
||||
},
|
||||
DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0],
|
||||
timestamp: 1000,
|
||||
},
|
||||
];
|
||||
|
||||
let mut log_a = EventLog::new();
|
||||
for e in events_a {
|
||||
log_a.append(e);
|
||||
}
|
||||
|
||||
let mut log_b = EventLog::new();
|
||||
for e in events_b {
|
||||
log_b.append(e);
|
||||
}
|
||||
|
||||
let state_a = log_a.replay();
|
||||
let state_b = log_b.replay();
|
||||
|
||||
// Independent node additions should give same final state
|
||||
assert_eq!(state_a.nodes.len(), state_b.nodes.len());
|
||||
assert_eq!(state_a.fingerprint(), state_b.fingerprint());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dependent_events_order_matters() {
|
||||
// Update after add vs. add directly with new value
|
||||
let mut log1 = EventLog::new();
|
||||
log1.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0],
|
||||
timestamp: 1000,
|
||||
});
|
||||
log1.append(DomainEvent::NodeUpdated {
|
||||
node_id: 1,
|
||||
old_state: vec![1.0],
|
||||
new_state: vec![2.0],
|
||||
timestamp: 1001,
|
||||
});
|
||||
|
||||
let mut log2 = EventLog::new();
|
||||
log2.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![2.0],
|
||||
timestamp: 1000,
|
||||
});
|
||||
|
||||
let state1 = log1.replay();
|
||||
let state2 = log2.replay();
|
||||
|
||||
// Both should result in node 1 having state [2.0]
|
||||
assert_eq!(state1.nodes.get(&1), state2.nodes.get(&1));
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: LARGE SCALE REPLAY
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_large_event_log_replay() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
// Create a moderately large graph
|
||||
let num_nodes = 100;
|
||||
let num_edges = 200;
|
||||
|
||||
for i in 0..num_nodes {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![i as f32 / num_nodes as f32; 4],
|
||||
timestamp: 1000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0..num_edges {
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: i % num_nodes,
|
||||
target: (i + 1) % num_nodes,
|
||||
weight: 1.0,
|
||||
timestamp: 1000 + num_nodes + i,
|
||||
});
|
||||
}
|
||||
|
||||
// Replay multiple times
|
||||
let states: Vec<_> = (0..5).map(|_| log.replay()).collect();
|
||||
|
||||
// All replays should produce the same fingerprint
|
||||
let first_fp = states[0].fingerprint();
|
||||
for state in &states {
|
||||
assert_eq!(state.fingerprint(), first_fp);
|
||||
assert_eq!(state.nodes.len(), num_nodes as usize);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_replay_with_many_updates() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
// Create nodes
|
||||
for i in 0..10 {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![0.0; 3],
|
||||
timestamp: 1000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
// Many updates
|
||||
for iteration in 0..100 {
|
||||
let node_id = iteration % 10;
|
||||
let new_val = iteration as f32 / 100.0;
|
||||
log.append(DomainEvent::NodeUpdated {
|
||||
node_id,
|
||||
old_state: vec![0.0; 3], // Simplified
|
||||
new_state: vec![new_val; 3],
|
||||
timestamp: 2000 + iteration,
|
||||
});
|
||||
}
|
||||
|
||||
// Replay should be deterministic
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(log.len(), 110); // 10 adds + 100 updates
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: SNAPSHOT AND RESTORE
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_consistency() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
// Build up state
|
||||
for i in 0..5 {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![i as f32],
|
||||
timestamp: 1000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
// Take a "snapshot" (clone the state)
|
||||
let snapshot = log.replay();
|
||||
let snapshot_fp = snapshot.fingerprint();
|
||||
|
||||
// Add more events
|
||||
for i in 5..10 {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![i as f32],
|
||||
timestamp: 2000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
// Replay up to snapshot point should match snapshot
|
||||
let restored = log.replay_until(1004);
|
||||
assert_eq!(restored.fingerprint(), snapshot_fp);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: CONCURRENT REPLAYS
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_replays() {
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
let mut log = EventLog::new();
|
||||
|
||||
for i in 0..50 {
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: i,
|
||||
state: vec![i as f32 / 50.0; 4],
|
||||
timestamp: 1000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0..100 {
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: i % 50,
|
||||
target: (i + 1) % 50,
|
||||
weight: 1.0,
|
||||
timestamp: 2000 + i,
|
||||
});
|
||||
}
|
||||
|
||||
let log = Arc::new(log);
|
||||
|
||||
let handles: Vec<_> = (0..8)
|
||||
.map(|_| {
|
||||
let log = Arc::clone(&log);
|
||||
thread::spawn(move || {
|
||||
let state = log.replay();
|
||||
state.fingerprint()
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let fingerprints: Vec<u64> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// All concurrent replays should produce the same fingerprint
|
||||
let first = fingerprints[0];
|
||||
for fp in &fingerprints {
|
||||
assert_eq!(
|
||||
*fp, first,
|
||||
"All replays should produce the same fingerprint"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: IDEMPOTENCY
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_double_replay_idempotent() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0, 0.5],
|
||||
timestamp: 1000,
|
||||
});
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: 1,
|
||||
target: 1, // Self-loop (edge case)
|
||||
weight: 0.5,
|
||||
timestamp: 1001,
|
||||
});
|
||||
|
||||
// Replay twice from the log
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
|
||||
// States should be identical
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(state1.event_count, state2.event_count);
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// TESTS: DELETION HANDLING
|
||||
// ============================================================================
|
||||
|
||||
#[test]
|
||||
fn test_deletion_replay() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
// Add nodes
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0],
|
||||
timestamp: 1000,
|
||||
});
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 2,
|
||||
state: vec![2.0],
|
||||
timestamp: 1001,
|
||||
});
|
||||
log.append(DomainEvent::EdgeAdded {
|
||||
source: 1,
|
||||
target: 2,
|
||||
weight: 1.0,
|
||||
timestamp: 1002,
|
||||
});
|
||||
|
||||
// Delete node (should cascade to edge)
|
||||
log.append(DomainEvent::NodeRemoved {
|
||||
node_id: 1,
|
||||
timestamp: 1003,
|
||||
});
|
||||
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(state1.nodes.len(), 1); // Only node 2 remains
|
||||
assert_eq!(state1.edges.len(), 0); // Edge was removed with node 1
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_delete_add_determinism() {
|
||||
let mut log = EventLog::new();
|
||||
|
||||
// Add node
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![1.0],
|
||||
timestamp: 1000,
|
||||
});
|
||||
|
||||
// Delete node
|
||||
log.append(DomainEvent::NodeRemoved {
|
||||
node_id: 1,
|
||||
timestamp: 1001,
|
||||
});
|
||||
|
||||
// Re-add node with different state
|
||||
log.append(DomainEvent::NodeAdded {
|
||||
node_id: 1,
|
||||
state: vec![2.0],
|
||||
timestamp: 1002,
|
||||
});
|
||||
|
||||
let state1 = log.replay();
|
||||
let state2 = log.replay();
|
||||
|
||||
assert_eq!(state1.fingerprint(), state2.fingerprint());
|
||||
assert_eq!(state1.nodes.get(&1), Some(&vec![2.0]));
|
||||
}
|
||||
1393
vendor/ruvector/crates/prime-radiant/tests/ruvllm_integration_tests.rs
vendored
Normal file
1393
vendor/ruvector/crates/prime-radiant/tests/ruvllm_integration_tests.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
692
vendor/ruvector/crates/prime-radiant/tests/storage_tests.rs
vendored
Normal file
692
vendor/ruvector/crates/prime-radiant/tests/storage_tests.rs
vendored
Normal file
@@ -0,0 +1,692 @@
|
||||
//! Comprehensive Storage Layer Tests
|
||||
//!
|
||||
//! Tests for:
|
||||
//! - InMemoryStorage CRUD operations
|
||||
//! - FileStorage persistence
|
||||
//! - Concurrent access patterns
|
||||
//! - Governance storage operations
|
||||
|
||||
use prime_radiant::storage::{
|
||||
FileStorage, GovernanceStorage, GraphStorage, InMemoryStorage, StorageFormat,
|
||||
};
|
||||
use std::sync::{Arc, Barrier};
|
||||
use std::thread;
|
||||
use tempfile::TempDir;
|
||||
|
||||
// ============================================================================
|
||||
// InMemoryStorage Unit Tests
|
||||
// ============================================================================
|
||||
|
||||
mod in_memory_storage_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_store_and_retrieve_node() {
|
||||
let storage = InMemoryStorage::new();
|
||||
let state = vec![1.0, 2.0, 3.0];
|
||||
|
||||
storage.store_node("node-1", &state).unwrap();
|
||||
let retrieved = storage.get_node("node-1").unwrap();
|
||||
|
||||
assert!(retrieved.is_some());
|
||||
assert_eq!(retrieved.unwrap(), state);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retrieve_nonexistent_node() {
|
||||
let storage = InMemoryStorage::new();
|
||||
let result = storage.get_node("nonexistent").unwrap();
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_node_state() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
storage.store_node("node-1", &[1.0, 0.0]).unwrap();
|
||||
storage.store_node("node-1", &[0.0, 1.0]).unwrap();
|
||||
|
||||
let retrieved = storage.get_node("node-1").unwrap().unwrap();
|
||||
assert_eq!(retrieved, vec![0.0, 1.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_and_delete_edge() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
storage.store_edge("a", "b", 1.5).unwrap();
|
||||
storage.store_edge("b", "c", 2.0).unwrap();
|
||||
|
||||
// Delete one edge
|
||||
storage.delete_edge("a", "b").unwrap();
|
||||
|
||||
// Should not fail on non-existent edge
|
||||
storage.delete_edge("x", "y").unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_similar_vectors() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
// Store orthogonal vectors
|
||||
storage.store_node("north", &[0.0, 1.0, 0.0]).unwrap();
|
||||
storage.store_node("east", &[1.0, 0.0, 0.0]).unwrap();
|
||||
storage.store_node("south", &[0.0, -1.0, 0.0]).unwrap();
|
||||
storage.store_node("up", &[0.0, 0.0, 1.0]).unwrap();
|
||||
|
||||
// Query for similar to north
|
||||
let results = storage.find_similar(&[0.0, 1.0, 0.0], 2).unwrap();
|
||||
|
||||
assert_eq!(results.len(), 2);
|
||||
assert_eq!(results[0].0, "north"); // Exact match
|
||||
assert!((results[0].1 - 1.0).abs() < 0.001); // Similarity = 1.0
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_similar_empty_query() {
|
||||
let storage = InMemoryStorage::new();
|
||||
storage.store_node("a", &[1.0, 2.0]).unwrap();
|
||||
|
||||
let results = storage.find_similar(&[], 5).unwrap();
|
||||
assert!(results.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_governance_store_policy() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
let policy_data = b"test policy bundle data";
|
||||
let id = storage.store_policy(policy_data).unwrap();
|
||||
|
||||
assert!(!id.is_empty());
|
||||
|
||||
let retrieved = storage.get_policy(&id).unwrap();
|
||||
assert!(retrieved.is_some());
|
||||
assert_eq!(retrieved.unwrap(), policy_data.to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_governance_store_witness() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
let witness_data = b"witness record data";
|
||||
let id = storage.store_witness(witness_data).unwrap();
|
||||
|
||||
assert!(!id.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_governance_store_lineage() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
let lineage_data = b"lineage record data";
|
||||
let id = storage.store_lineage(lineage_data).unwrap();
|
||||
|
||||
assert!(!id.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_node_writes() {
|
||||
let storage: Arc<InMemoryStorage> = Arc::new(InMemoryStorage::new());
|
||||
let num_threads = 10;
|
||||
let barrier = Arc::new(Barrier::new(num_threads));
|
||||
let mut handles = vec![];
|
||||
|
||||
for i in 0..num_threads {
|
||||
let storage_clone: Arc<InMemoryStorage> = Arc::clone(&storage);
|
||||
let barrier_clone = Arc::clone(&barrier);
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
// Wait for all threads to be ready
|
||||
barrier_clone.wait();
|
||||
|
||||
for j in 0..100 {
|
||||
let node_id = format!("node-{}-{}", i, j);
|
||||
let state = vec![i as f32, j as f32];
|
||||
storage_clone.store_node(&node_id, &state).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all threads to complete
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
|
||||
// Verify we can retrieve a sample node
|
||||
let result = storage.get_node("node-5-50").unwrap();
|
||||
assert!(result.is_some());
|
||||
assert_eq!(result.unwrap(), vec![5.0, 50.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_reads_and_writes() {
|
||||
let storage: Arc<InMemoryStorage> = Arc::new(InMemoryStorage::new());
|
||||
|
||||
// Pre-populate some data
|
||||
for i in 0..100 {
|
||||
storage
|
||||
.store_node(&format!("node-{}", i), &[i as f32])
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let num_threads = 8;
|
||||
let barrier = Arc::new(Barrier::new(num_threads));
|
||||
let mut handles = vec![];
|
||||
|
||||
for i in 0..num_threads {
|
||||
let storage_clone: Arc<InMemoryStorage> = Arc::clone(&storage);
|
||||
let barrier_clone = Arc::clone(&barrier);
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
barrier_clone.wait();
|
||||
|
||||
for j in 0..50 {
|
||||
if i % 2 == 0 {
|
||||
// Writers
|
||||
let node_id = format!("new-node-{}-{}", i, j);
|
||||
storage_clone.store_node(&node_id, &[j as f32]).unwrap();
|
||||
} else {
|
||||
// Readers
|
||||
let node_id = format!("node-{}", j);
|
||||
let _ = storage_clone.get_node(&node_id).unwrap();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_large_vector_storage() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
// Store a high-dimensional vector
|
||||
let large_vec: Vec<f32> = (0..1024).map(|i| i as f32 / 1024.0).collect();
|
||||
storage.store_node("large-vector", &large_vec).unwrap();
|
||||
|
||||
let retrieved = storage.get_node("large-vector").unwrap().unwrap();
|
||||
assert_eq!(retrieved.len(), 1024);
|
||||
assert!((retrieved[0] - 0.0).abs() < 0.001);
|
||||
assert!((retrieved[1023] - 1023.0 / 1024.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_many_nodes() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
// Store many nodes
|
||||
for i in 0..1000 {
|
||||
let node_id = format!("node-{}", i);
|
||||
let state = vec![(i % 100) as f32, (i / 100) as f32];
|
||||
storage.store_node(&node_id, &state).unwrap();
|
||||
}
|
||||
|
||||
// Verify random access works
|
||||
let n500 = storage.get_node("node-500").unwrap().unwrap();
|
||||
assert_eq!(n500, vec![0.0, 5.0]);
|
||||
|
||||
let n999 = storage.get_node("node-999").unwrap().unwrap();
|
||||
assert_eq!(n999, vec![99.0, 9.0]);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// FileStorage Unit Tests
|
||||
// ============================================================================
|
||||
|
||||
mod file_storage_tests {
|
||||
use super::*;
|
||||
|
||||
fn create_temp_storage() -> (FileStorage, TempDir) {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
(storage, temp_dir)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_and_retrieve_node() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
let state = vec![1.0, 2.0, 3.0, 4.0];
|
||||
|
||||
storage.store_node("test-node", &state).unwrap();
|
||||
let retrieved = storage.get_node("test-node").unwrap();
|
||||
|
||||
assert!(retrieved.is_some());
|
||||
assert_eq!(retrieved.unwrap(), state);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retrieve_nonexistent_node() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
let result = storage.get_node("nonexistent").unwrap();
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_and_delete_edge() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
|
||||
storage.store_edge("source", "target", 0.75).unwrap();
|
||||
let stats = storage.stats();
|
||||
assert_eq!(stats.edge_count, 1);
|
||||
|
||||
storage.delete_edge("source", "target").unwrap();
|
||||
let stats = storage.stats();
|
||||
assert_eq!(stats.edge_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence_across_instances() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
// First instance: write data
|
||||
{
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
storage
|
||||
.store_node("persistent-node", &[1.0, 2.0, 3.0])
|
||||
.unwrap();
|
||||
storage.store_edge("a", "b", 1.5).unwrap();
|
||||
storage.sync().unwrap();
|
||||
}
|
||||
|
||||
// Second instance: read data back
|
||||
{
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
let node_state = storage.get_node("persistent-node").unwrap();
|
||||
|
||||
assert!(node_state.is_some());
|
||||
assert_eq!(node_state.unwrap(), vec![1.0, 2.0, 3.0]);
|
||||
|
||||
let stats = storage.stats();
|
||||
assert_eq!(stats.node_count, 1);
|
||||
assert_eq!(stats.edge_count, 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_format() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage =
|
||||
FileStorage::with_options(temp_dir.path(), StorageFormat::Json, false).unwrap();
|
||||
|
||||
storage.store_node("json-node", &[1.5, 2.5, 3.5]).unwrap();
|
||||
|
||||
let retrieved = storage.get_node("json-node").unwrap();
|
||||
assert!(retrieved.is_some());
|
||||
assert_eq!(retrieved.unwrap(), vec![1.5, 2.5, 3.5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bincode_format() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage =
|
||||
FileStorage::with_options(temp_dir.path(), StorageFormat::Bincode, false).unwrap();
|
||||
|
||||
storage.store_node("bincode-node", &[1.0, 2.0]).unwrap();
|
||||
|
||||
let retrieved = storage.get_node("bincode-node").unwrap();
|
||||
assert!(retrieved.is_some());
|
||||
assert_eq!(retrieved.unwrap(), vec![1.0, 2.0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wal_recovery() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
// Write with WAL enabled
|
||||
{
|
||||
let storage =
|
||||
FileStorage::with_options(temp_dir.path(), StorageFormat::Bincode, true).unwrap();
|
||||
storage.store_node("wal-node", &[1.0, 2.0, 3.0]).unwrap();
|
||||
// Don't call sync - simulate crash
|
||||
}
|
||||
|
||||
// Re-open and verify WAL recovery
|
||||
{
|
||||
let storage =
|
||||
FileStorage::with_options(temp_dir.path(), StorageFormat::Bincode, true).unwrap();
|
||||
let node_state = storage.get_node("wal-node").unwrap();
|
||||
assert!(node_state.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_governance_policy_persistence() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
let policy_id;
|
||||
{
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
policy_id = storage.store_policy(b"important policy data").unwrap();
|
||||
storage.sync().unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
let policy = storage.get_policy(&policy_id).unwrap();
|
||||
assert!(policy.is_some());
|
||||
assert_eq!(policy.unwrap(), b"important policy data".to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_similar_vectors() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
|
||||
storage.store_node("a", &[1.0, 0.0, 0.0]).unwrap();
|
||||
storage.store_node("b", &[0.9, 0.1, 0.0]).unwrap();
|
||||
storage.store_node("c", &[0.0, 1.0, 0.0]).unwrap();
|
||||
|
||||
let results = storage.find_similar(&[1.0, 0.0, 0.0], 2).unwrap();
|
||||
|
||||
assert_eq!(results.len(), 2);
|
||||
// "a" should be first (exact match)
|
||||
assert_eq!(results[0].0, "a");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_storage_stats() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
|
||||
storage.store_node("n1", &[1.0]).unwrap();
|
||||
storage.store_node("n2", &[2.0]).unwrap();
|
||||
storage.store_edge("n1", "n2", 1.0).unwrap();
|
||||
|
||||
let stats = storage.stats();
|
||||
assert_eq!(stats.node_count, 2);
|
||||
assert_eq!(stats.edge_count, 1);
|
||||
assert!(stats.wal_enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_file_operations() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage: Arc<FileStorage> = Arc::new(FileStorage::new(temp_dir.path()).unwrap());
|
||||
|
||||
let num_threads = 4;
|
||||
let barrier = Arc::new(Barrier::new(num_threads));
|
||||
let mut handles = vec![];
|
||||
|
||||
for i in 0..num_threads {
|
||||
let storage_clone: Arc<FileStorage> = Arc::clone(&storage);
|
||||
let barrier_clone = Arc::clone(&barrier);
|
||||
|
||||
let handle = thread::spawn(move || {
|
||||
barrier_clone.wait();
|
||||
|
||||
for j in 0..25 {
|
||||
let node_id = format!("concurrent-{}-{}", i, j);
|
||||
storage_clone
|
||||
.store_node(&node_id, &[i as f32, j as f32])
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
|
||||
// Verify all writes succeeded
|
||||
let stats = storage.stats();
|
||||
assert_eq!(stats.node_count, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_witness_storage_and_retrieval() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
|
||||
// Store multiple witnesses
|
||||
let w1_data = b"witness-1-action-abc";
|
||||
let w2_data = b"witness-2-action-xyz";
|
||||
let w3_data = b"witness-3-action-abc";
|
||||
|
||||
storage.store_witness(w1_data).unwrap();
|
||||
storage.store_witness(w2_data).unwrap();
|
||||
storage.store_witness(w3_data).unwrap();
|
||||
|
||||
// Search for witnesses containing "action-abc"
|
||||
let results = storage.get_witnesses_for_action("action-abc").unwrap();
|
||||
assert_eq!(results.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lineage_storage() {
|
||||
let (storage, _dir) = create_temp_storage();
|
||||
|
||||
let lineage_data = b"lineage record with dependencies";
|
||||
let id = storage.store_lineage(lineage_data).unwrap();
|
||||
|
||||
assert!(!id.is_empty());
|
||||
// Lineages are write-only in the basic API, so just verify storage succeeded
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Integration Tests: Storage Pipelines
|
||||
// ============================================================================
|
||||
|
||||
mod integration_tests {
|
||||
use super::*;
|
||||
|
||||
/// Test the complete storage flow for a multi-tenant scenario
|
||||
#[test]
|
||||
fn test_multi_tenant_isolation() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
// Tenant A data (with namespace prefix)
|
||||
storage.store_node("tenant-a::node-1", &[1.0, 0.0]).unwrap();
|
||||
storage.store_node("tenant-a::node-2", &[0.0, 1.0]).unwrap();
|
||||
|
||||
// Tenant B data
|
||||
storage.store_node("tenant-b::node-1", &[0.5, 0.5]).unwrap();
|
||||
storage.store_node("tenant-b::node-2", &[0.3, 0.7]).unwrap();
|
||||
|
||||
// Verify isolation - tenant A's node-1 is different from tenant B's
|
||||
let a_node = storage.get_node("tenant-a::node-1").unwrap().unwrap();
|
||||
let b_node = storage.get_node("tenant-b::node-1").unwrap().unwrap();
|
||||
|
||||
assert_ne!(a_node, b_node);
|
||||
|
||||
// Find similar should respect prefixes
|
||||
let results = storage.find_similar(&[1.0, 0.0], 4).unwrap();
|
||||
assert!(results.iter().any(|(id, _)| id == "tenant-a::node-1"));
|
||||
}
|
||||
|
||||
/// Test governance data isolation
|
||||
#[test]
|
||||
fn test_governance_policy_isolation() {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
// Store multiple policies
|
||||
let policy_a = storage.store_policy(b"policy-for-tenant-a").unwrap();
|
||||
let policy_b = storage.store_policy(b"policy-for-tenant-b").unwrap();
|
||||
|
||||
// Each policy should have a unique ID
|
||||
assert_ne!(policy_a, policy_b);
|
||||
|
||||
// Retrieval should work independently
|
||||
let a_data = storage.get_policy(&policy_a).unwrap().unwrap();
|
||||
let b_data = storage.get_policy(&policy_b).unwrap().unwrap();
|
||||
|
||||
assert_eq!(a_data, b"policy-for-tenant-a".to_vec());
|
||||
assert_eq!(b_data, b"policy-for-tenant-b".to_vec());
|
||||
}
|
||||
|
||||
/// Test file storage survives process restart
|
||||
#[test]
|
||||
fn test_file_storage_durability() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
// Simulate first process
|
||||
{
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
|
||||
// Store graph data
|
||||
storage
|
||||
.store_node("persistent-1", &[1.0, 2.0, 3.0])
|
||||
.unwrap();
|
||||
storage
|
||||
.store_node("persistent-2", &[4.0, 5.0, 6.0])
|
||||
.unwrap();
|
||||
storage
|
||||
.store_edge("persistent-1", "persistent-2", 0.5)
|
||||
.unwrap();
|
||||
|
||||
// Store governance data
|
||||
storage.store_policy(b"durable-policy").unwrap();
|
||||
storage.store_witness(b"durable-witness").unwrap();
|
||||
|
||||
storage.sync().unwrap();
|
||||
// Storage dropped here
|
||||
}
|
||||
|
||||
// Simulate second process (restart)
|
||||
{
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
|
||||
// All data should be present
|
||||
let stats = storage.stats();
|
||||
assert_eq!(stats.node_count, 2);
|
||||
assert_eq!(stats.edge_count, 1);
|
||||
|
||||
let node1 = storage.get_node("persistent-1").unwrap().unwrap();
|
||||
assert_eq!(node1, vec![1.0, 2.0, 3.0]);
|
||||
}
|
||||
}
|
||||
|
||||
/// Test hybrid storage (memory + file) fallback pattern
|
||||
#[test]
|
||||
fn test_storage_fallback_pattern() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_storage: Arc<FileStorage> = Arc::new(FileStorage::new(temp_dir.path()).unwrap());
|
||||
let memory_cache = InMemoryStorage::new();
|
||||
|
||||
// Simulate a read-through cache pattern
|
||||
let node_id = "cached-node";
|
||||
let state = vec![1.0, 2.0, 3.0];
|
||||
|
||||
// Write to persistent storage
|
||||
file_storage.store_node(node_id, &state).unwrap();
|
||||
|
||||
// Check cache first (miss)
|
||||
let cached = memory_cache.get_node(node_id).unwrap();
|
||||
assert!(cached.is_none());
|
||||
|
||||
// Read from persistent storage
|
||||
let persistent = file_storage.get_node(node_id).unwrap().unwrap();
|
||||
|
||||
// Populate cache
|
||||
memory_cache.store_node(node_id, &persistent).unwrap();
|
||||
|
||||
// Now cache hit
|
||||
let cached = memory_cache.get_node(node_id).unwrap();
|
||||
assert!(cached.is_some());
|
||||
assert_eq!(cached.unwrap(), state);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Property-Based Tests
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod property_tests {
|
||||
use super::*;
|
||||
use proptest::prelude::*;
|
||||
|
||||
proptest! {
|
||||
/// Energy (squared norm) is always non-negative
|
||||
#[test]
|
||||
fn energy_is_non_negative(
|
||||
values in prop::collection::vec(-1000.0f32..1000.0, 1..100)
|
||||
) {
|
||||
// For any residual vector, its squared norm (energy) is non-negative
|
||||
let energy: f32 = values.iter().map(|v| v * v).sum();
|
||||
prop_assert!(energy >= 0.0);
|
||||
}
|
||||
|
||||
/// Zero residual implies zero energy
|
||||
#[test]
|
||||
fn zero_residual_zero_energy(dim in 1usize..100) {
|
||||
let zeros = vec![0.0f32; dim];
|
||||
let energy: f32 = zeros.iter().map(|v| v * v).sum();
|
||||
prop_assert!((energy - 0.0).abs() < 1e-10);
|
||||
}
|
||||
|
||||
/// Storing and retrieving preserves data exactly
|
||||
#[test]
|
||||
fn store_retrieve_preserves_data(
|
||||
node_id in "[a-z]{1,10}",
|
||||
state in prop::collection::vec(-1000.0f32..1000.0, 1..10)
|
||||
) {
|
||||
let storage = InMemoryStorage::new();
|
||||
storage.store_node(&node_id, &state).unwrap();
|
||||
|
||||
let retrieved = storage.get_node(&node_id).unwrap().unwrap();
|
||||
prop_assert_eq!(retrieved, state);
|
||||
}
|
||||
|
||||
/// File storage preserves data exactly
|
||||
#[test]
|
||||
fn file_store_retrieve_preserves_data(
|
||||
node_id in "[a-z]{1,10}",
|
||||
state in prop::collection::vec(-100.0f32..100.0, 1..10)
|
||||
) {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = FileStorage::new(temp_dir.path()).unwrap();
|
||||
|
||||
storage.store_node(&node_id, &state).unwrap();
|
||||
let retrieved = storage.get_node(&node_id).unwrap().unwrap();
|
||||
|
||||
prop_assert_eq!(retrieved, state);
|
||||
}
|
||||
|
||||
/// Similar vectors have high cosine similarity
|
||||
#[test]
|
||||
fn similar_vectors_high_similarity(
|
||||
base in prop::collection::vec(0.1f32..1.0, 3..10)
|
||||
) {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
// Normalize base
|
||||
let norm: f32 = base.iter().map(|v| v * v).sum::<f32>().sqrt();
|
||||
let normalized: Vec<f32> = base.iter().map(|v| v / norm).collect();
|
||||
|
||||
storage.store_node("base", &normalized).unwrap();
|
||||
|
||||
// Query with the same vector should give similarity ~1.0
|
||||
let results = storage.find_similar(&normalized, 1).unwrap();
|
||||
|
||||
if let Some((id, sim)) = results.first() {
|
||||
prop_assert_eq!(id, "base");
|
||||
prop_assert!(*sim > 0.99);
|
||||
}
|
||||
}
|
||||
|
||||
/// Witness chain maintains order
|
||||
#[test]
|
||||
fn witness_chain_order(count in 1usize..20) {
|
||||
let storage = InMemoryStorage::new();
|
||||
|
||||
for i in 0..count {
|
||||
let data = format!("witness-{}", i);
|
||||
let _ = storage.store_witness(data.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
// Each witness should have been stored
|
||||
// (We can't verify order without access to internal state,
|
||||
// but this verifies no failures under load)
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user