Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,362 @@
//! Unit tests for exo-backend-classical (ruvector integration)
#[cfg(test)]
mod substrate_backend_impl_tests {
use super::*;
// use exo_backend_classical::*;
// use exo_core::{SubstrateBackend, Pattern, Filter};
#[test]
fn test_classical_backend_construction() {
// Test creating classical backend
// let config = ClassicalBackendConfig {
// hnsw_m: 16,
// hnsw_ef_construction: 200,
// dimension: 128,
// };
//
// let backend = ClassicalBackend::new(config).unwrap();
//
// assert!(backend.is_initialized());
}
#[test]
fn test_similarity_search_basic() {
// Test basic similarity search
// let backend = setup_backend();
//
// // Insert some vectors
// for i in 0..100 {
// let vector = generate_random_vector(128);
// backend.insert(&vector, &metadata(i)).unwrap();
// }
//
// let query = generate_random_vector(128);
// let results = backend.similarity_search(&query, 10, None).unwrap();
//
// assert_eq!(results.len(), 10);
}
#[test]
fn test_similarity_search_with_filter() {
// Test similarity search with metadata filter
// let backend = setup_backend();
//
// let filter = Filter::new("category", "test");
// let results = backend.similarity_search(&query, 10, Some(&filter)).unwrap();
//
// // All results should match filter
// assert!(results.iter().all(|r| r.metadata.get("category") == Some("test")));
}
#[test]
fn test_similarity_search_empty_index() {
// Test search on empty index
// let backend = ClassicalBackend::new(config).unwrap();
// let query = vec![0.1, 0.2, 0.3];
//
// let results = backend.similarity_search(&query, 10, None).unwrap();
//
// assert!(results.is_empty());
}
#[test]
fn test_similarity_search_k_larger_than_index() {
// Test requesting more results than available
// let backend = setup_backend();
//
// // Insert only 5 vectors
// for i in 0..5 {
// backend.insert(&vector(i), &metadata(i)).unwrap();
// }
//
// // Request 10
// let results = backend.similarity_search(&query, 10, None).unwrap();
//
// assert_eq!(results.len(), 5); // Should return only what's available
}
}
#[cfg(test)]
mod manifold_deform_tests {
use super::*;
#[test]
fn test_manifold_deform_as_insert() {
// Test that manifold_deform performs discrete insert on classical backend
// let backend = setup_backend();
//
// let pattern = Pattern {
// embedding: vec![0.1, 0.2, 0.3],
// metadata: Metadata::default(),
// timestamp: SubstrateTime::now(),
// antecedents: vec![],
// };
//
// let delta = backend.manifold_deform(&pattern, 0.5).unwrap();
//
// match delta {
// ManifoldDelta::DiscreteInsert { id } => {
// assert!(backend.contains(id));
// }
// _ => panic!("Expected DiscreteInsert"),
// }
}
#[test]
fn test_manifold_deform_ignores_learning_rate() {
// Classical backend should ignore learning_rate parameter
// let backend = setup_backend();
//
// let delta1 = backend.manifold_deform(&pattern, 0.1).unwrap();
// let delta2 = backend.manifold_deform(&pattern, 0.9).unwrap();
//
// // Both should perform same insert operation
}
}
#[cfg(test)]
mod hyperedge_query_tests {
use super::*;
#[test]
fn test_hyperedge_query_not_supported() {
// Test that advanced topological queries return NotSupported
// let backend = setup_backend();
//
// let query = TopologicalQuery::SheafConsistency {
// local_sections: vec![],
// };
//
// let result = backend.hyperedge_query(&query).unwrap();
//
// assert!(matches!(result, HyperedgeResult::NotSupported));
}
#[test]
fn test_hyperedge_query_basic_support() {
// Test basic hyperedge operations if supported
// May use ruvector-graph hyperedge features
}
}
#[cfg(test)]
mod ruvector_core_integration_tests {
use super::*;
#[test]
fn test_ruvector_core_hnsw() {
// Test integration with ruvector-core HNSW index
// let backend = ClassicalBackend::new(config).unwrap();
//
// // Verify HNSW parameters applied
// assert_eq!(backend.hnsw_config().m, 16);
// assert_eq!(backend.hnsw_config().ef_construction, 200);
}
#[test]
fn test_ruvector_core_metadata() {
// Test metadata storage via ruvector-core
}
#[test]
fn test_ruvector_core_persistence() {
// Test save/load via ruvector-core
}
}
#[cfg(test)]
mod ruvector_graph_integration_tests {
use super::*;
#[test]
fn test_ruvector_graph_database() {
// Test GraphDatabase integration
// let backend = setup_backend_with_graph();
//
// // Create entities and edges
// let e1 = backend.graph_db.add_node(data1);
// let e2 = backend.graph_db.add_node(data2);
// backend.graph_db.add_edge(e1, e2, relation);
//
// // Query graph
// let neighbors = backend.graph_db.neighbors(e1);
// assert!(neighbors.contains(&e2));
}
#[test]
fn test_ruvector_graph_hyperedge() {
// Test ruvector-graph hyperedge support
}
}
#[cfg(test)]
mod ruvector_gnn_integration_tests {
use super::*;
#[test]
fn test_ruvector_gnn_layer() {
// Test GNN layer integration
// let backend = setup_backend_with_gnn();
//
// // Apply GNN layer
// let embeddings = backend.gnn_layer.forward(&graph);
//
// assert!(embeddings.len() > 0);
}
#[test]
fn test_ruvector_gnn_message_passing() {
// Test message passing via GNN
}
}
#[cfg(test)]
mod error_handling_tests {
use super::*;
#[test]
fn test_error_conversion() {
// Test ruvector error conversion to SubstrateBackend::Error
// let backend = setup_backend();
//
// // Trigger ruvector error (e.g., invalid dimension)
// let invalid_vector = vec![0.1]; // Wrong dimension
// let result = backend.similarity_search(&invalid_vector, 10, None);
//
// assert!(result.is_err());
}
#[test]
fn test_error_display() {
// Test error display implementation
}
}
#[cfg(test)]
mod performance_tests {
use super::*;
#[test]
fn test_search_latency() {
// Test search latency meets targets
// let backend = setup_large_backend(100000);
//
// let start = Instant::now();
// backend.similarity_search(&query, 10, None).unwrap();
// let duration = start.elapsed();
//
// assert!(duration.as_millis() < 10); // <10ms target
}
#[test]
fn test_insert_throughput() {
// Test insert throughput
// let backend = setup_backend();
//
// let start = Instant::now();
// for i in 0..10000 {
// backend.manifold_deform(&pattern(i), 0.5).unwrap();
// }
// let duration = start.elapsed();
//
// let throughput = 10000.0 / duration.as_secs_f64();
// assert!(throughput > 10000.0); // >10k ops/s target
}
}
#[cfg(test)]
mod memory_tests {
use super::*;
#[test]
fn test_memory_usage() {
// Test memory footprint
// let backend = setup_backend();
//
// let initial_mem = current_memory_usage();
//
// // Insert vectors
// for i in 0..100000 {
// backend.manifold_deform(&pattern(i), 0.5).unwrap();
// }
//
// let final_mem = current_memory_usage();
// let mem_per_vector = (final_mem - initial_mem) / 100000;
//
// // Should be reasonable per-vector overhead
// assert!(mem_per_vector < 1024); // <1KB per vector
}
}
#[cfg(test)]
mod concurrency_tests {
use super::*;
#[test]
fn test_concurrent_searches() {
// Test concurrent search operations
// let backend = Arc::new(setup_backend());
//
// let handles: Vec<_> = (0..10).map(|_| {
// let backend = backend.clone();
// std::thread::spawn(move || {
// backend.similarity_search(&random_query(), 10, None).unwrap()
// })
// }).collect();
//
// for handle in handles {
// let results = handle.join().unwrap();
// assert_eq!(results.len(), 10);
// }
}
#[test]
fn test_concurrent_inserts() {
// Test concurrent insert operations
}
}
#[cfg(test)]
mod edge_cases_tests {
use super::*;
#[test]
fn test_zero_dimension() {
// Test error on zero-dimension vectors
// let config = ClassicalBackendConfig {
// dimension: 0,
// ..Default::default()
// };
//
// let result = ClassicalBackend::new(config);
// assert!(result.is_err());
}
#[test]
fn test_extreme_k_values() {
// Test with k=0 and k=usize::MAX
// let backend = setup_backend();
//
// let results_zero = backend.similarity_search(&query, 0, None).unwrap();
// assert!(results_zero.is_empty());
//
// let results_max = backend.similarity_search(&query, usize::MAX, None).unwrap();
// // Should return all available results
}
#[test]
fn test_nan_in_query() {
// Test handling of NaN in query vector
// let backend = setup_backend();
// let query_with_nan = vec![f32::NAN, 0.2, 0.3];
//
// let result = backend.similarity_search(&query_with_nan, 10, None);
// assert!(result.is_err());
}
#[test]
fn test_infinity_in_query() {
// Test handling of infinity in query vector
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,205 @@
//! Performance benchmarks for EXO-AI cognitive substrate
//!
//! Tests the performance of theoretical framework implementations
use std::time::Instant;
// EXO-AI crates
use exo_core::{Metadata, Pattern, PatternId, SubstrateTime};
use exo_federation::crypto::PostQuantumKeypair;
use exo_temporal::{ConsolidationConfig, Query, TemporalConfig, TemporalMemory};
const VECTOR_DIM: usize = 384;
const NUM_VECTORS: usize = 1_000;
const K_NEAREST: usize = 10;
fn generate_random_vector(dim: usize, seed: u64) -> Vec<f32> {
let mut vec = Vec::with_capacity(dim);
let mut state = seed;
for _ in 0..dim {
state = state.wrapping_mul(6364136223846793005).wrapping_add(1);
vec.push((state as f32) / (u64::MAX as f32));
}
vec
}
#[test]
fn benchmark_temporal_memory() {
println!("\n=== EXO-AI Temporal Memory Performance ===\n");
let vectors: Vec<Vec<f32>> = (0..NUM_VECTORS)
.map(|i| generate_random_vector(VECTOR_DIM, i as u64))
.collect();
let config = TemporalConfig {
consolidation: ConsolidationConfig {
salience_threshold: 0.0,
..Default::default()
},
..Default::default()
};
let temporal = TemporalMemory::new(config);
// Insert benchmark
let start = Instant::now();
for vec in vectors.iter() {
let pattern = Pattern {
id: PatternId::new(),
embedding: vec.clone(),
metadata: Metadata::default(),
timestamp: SubstrateTime::now(),
antecedents: Vec::new(),
salience: 1.0,
};
temporal.store(pattern, &[]).unwrap();
}
let insert_time = start.elapsed();
println!("Insert {} patterns: {:?}", NUM_VECTORS, insert_time);
println!(" Per insert: {:?}", insert_time / NUM_VECTORS as u32);
// Consolidation benchmark
let start = Instant::now();
let result = temporal.consolidate();
let consolidate_time = start.elapsed();
println!("\nConsolidate: {:?}", consolidate_time);
println!(" Patterns consolidated: {}", result.num_consolidated);
// Search benchmark
let query = Query::from_embedding(generate_random_vector(VECTOR_DIM, 999999));
let start = Instant::now();
for _ in 0..100 {
let _ = temporal.long_term().search(&query);
}
let search_time = start.elapsed();
println!("\n100 searches: {:?}", search_time);
println!(" Per search: {:?}", search_time / 100);
}
#[test]
fn benchmark_consciousness_metrics() {
use exo_core::consciousness::{ConsciousnessCalculator, NodeState, SubstrateRegion};
use std::collections::HashMap;
println!("\n=== IIT Phi Calculation Performance ===\n");
// Create a small reentrant network
let nodes = vec![1, 2, 3, 4, 5];
let mut connections = HashMap::new();
connections.insert(1, vec![2, 3]);
connections.insert(2, vec![4]);
connections.insert(3, vec![4]);
connections.insert(4, vec![5]);
connections.insert(5, vec![1]); // Feedback loop
let mut states = HashMap::new();
for &node in &nodes {
states.insert(
node,
NodeState {
activation: 0.5,
previous_activation: 0.4,
},
);
}
let region = SubstrateRegion {
id: "test".to_string(),
nodes,
connections,
states,
has_reentrant_architecture: true,
};
let calculator = ConsciousnessCalculator::new(100);
let start = Instant::now();
let mut total_phi = 0.0;
for _ in 0..1000 {
let result = calculator.compute_phi(&region);
total_phi += result.phi;
}
let phi_time = start.elapsed();
println!("1000 Phi calculations: {:?}", phi_time);
println!(" Per calculation: {:?}", phi_time / 1000);
println!(" Average Phi: {:.4}", total_phi / 1000.0);
}
#[test]
fn benchmark_thermodynamic_tracking() {
use exo_core::thermodynamics::{Operation, ThermodynamicTracker};
println!("\n=== Landauer Thermodynamic Tracking Performance ===\n");
let tracker = ThermodynamicTracker::room_temperature();
let start = Instant::now();
for _ in 0..100_000 {
tracker.record_operation(Operation::VectorSimilarity { dimensions: 384 });
tracker.record_operation(Operation::MemoryWrite { bytes: 1536 });
}
let track_time = start.elapsed();
println!("200,000 operation recordings: {:?}", track_time);
println!(" Per operation: {:?}", track_time / 200_000);
let report = tracker.efficiency_report();
println!("\nEfficiency Report:");
println!(" Total bit erasures: {}", report.total_bit_erasures);
println!(
" Landauer minimum: {:.2e} J",
report.landauer_minimum_joules
);
println!(
" Estimated actual: {:.2e} J",
report.estimated_actual_joules
);
println!(
" Efficiency ratio: {:.0}x above Landauer",
report.efficiency_ratio
);
println!(
" Reversible savings: {:.2}%",
(report.reversible_savings_potential / report.estimated_actual_joules) * 100.0
);
}
#[test]
fn benchmark_post_quantum_crypto() {
println!("\n=== Post-Quantum Cryptography Performance ===\n");
// Key generation
let start = Instant::now();
let mut keypairs = Vec::new();
for _ in 0..100 {
keypairs.push(PostQuantumKeypair::generate());
}
let keygen_time = start.elapsed();
println!("100 Kyber-1024 keypair generations: {:?}", keygen_time);
println!(" Per keypair: {:?}", keygen_time / 100);
// Encapsulation
let start = Instant::now();
for keypair in keypairs.iter().take(100) {
let _ = PostQuantumKeypair::encapsulate(keypair.public_key()).unwrap();
}
let encap_time = start.elapsed();
println!("\n100 encapsulations: {:?}", encap_time);
println!(" Per encapsulation: {:?}", encap_time / 100);
// Decapsulation
let keypair = &keypairs[0];
let (_, ciphertext) = PostQuantumKeypair::encapsulate(keypair.public_key()).unwrap();
let start = Instant::now();
for _ in 0..100 {
let _ = keypair.decapsulate(&ciphertext).unwrap();
}
let decap_time = start.elapsed();
println!("\n100 decapsulations: {:?}", decap_time);
println!(" Per decapsulation: {:?}", decap_time / 100);
println!("\nSecurity: NIST Level 5 (256-bit post-quantum)");
println!("Public key size: 1568 bytes");
println!("Ciphertext size: 1568 bytes");
}

View File

@@ -0,0 +1,121 @@
//! End-to-end integration test for the full 5-phase transfer pipeline.
//!
//! This test exercises `ExoTransferOrchestrator` which wires together:
//! - Phase 1: Thompson-sampling domain bridge
//! - Phase 2: Transfer manifold (exo-manifold)
//! - Phase 3: Transfer timeline (exo-temporal)
//! - Phase 4: Transfer CRDT (exo-federation)
//! - Phase 5: Emergent detection (exo-exotic)
use exo_backend_classical::transfer_orchestrator::ExoTransferOrchestrator;
#[test]
fn test_full_transfer_pipeline_single_cycle() {
let mut orch = ExoTransferOrchestrator::new("e2e_node");
// Before any cycle, no prior should be known.
assert!(orch.best_prior().is_none());
assert_eq!(orch.cycle(), 0);
// Run first cycle: establishes a baseline.
let result = orch.run_cycle();
assert_eq!(result.cycle, 1, "cycle counter should increment");
assert!(
result.eval_score >= 0.0 && result.eval_score <= 1.0,
"eval_score must be in [0, 1]: got {}",
result.eval_score
);
assert!(
result.manifold_entries >= 1,
"at least one prior should be stored in the manifold"
);
// Phase 4: CRDT should now have a prior for the (src, dst) pair.
assert!(
orch.best_prior().is_some(),
"CRDT should hold a prior after the first cycle"
);
}
#[test]
fn test_full_transfer_pipeline_multi_cycle() {
let mut orch = ExoTransferOrchestrator::new("e2e_multi");
// Run several cycles to let all phases accumulate state.
for expected_cycle in 1..=6u64 {
let result = orch.run_cycle();
assert_eq!(result.cycle, expected_cycle);
assert!(result.eval_score >= 0.0 && result.eval_score <= 1.0);
}
// After 6 cycles:
// - Manifold should hold (src, dst) prior from every cycle.
let last = orch.run_cycle();
assert_eq!(last.cycle, 7);
assert!(last.manifold_entries >= 1);
// - Emergence detector should be active (score is a valid float).
assert!(last.emergence_score.is_finite());
// - CRDT should know both domain IDs.
let prior = orch.best_prior().expect("CRDT must hold a prior");
assert_eq!(prior.src_domain, "exo-retrieval");
assert_eq!(prior.dst_domain, "exo-graph");
assert!(prior.improvement >= 0.0 && prior.improvement <= 1.0);
assert!(prior.confidence >= 0.0 && prior.confidence <= 1.0);
assert!(prior.cycle >= 1);
}
#[test]
fn test_transfer_emergence_increases_with_cycles() {
let mut orch = ExoTransferOrchestrator::new("e2e_emergence");
// Baseline (cycle 1 records baseline score, emergence = initial detection).
orch.run_cycle();
// Subsequent cycles contribute post-transfer scores.
let mut scores: Vec<f64> = Vec::new();
for _ in 0..5 {
let r = orch.run_cycle();
scores.push(r.emergence_score);
}
// All emergence scores must be finite non-negative values.
for score in &scores {
assert!(score.is_finite(), "emergence score must be finite");
assert!(*score >= 0.0, "emergence score must be non-negative");
}
}
#[test]
fn test_transfer_manifold_accumulates() {
let mut orch = ExoTransferOrchestrator::new("e2e_manifold");
// Each cycle stores a prior in the manifold.
for i in 1..=5 {
let result = orch.run_cycle();
// Manifold stores one entry per (src, dst) pair; repeated writes
// update the same entry, so count stays at 1.
assert!(
result.manifold_entries >= 1,
"cycle {}: manifold must hold ≥1 entry",
i
);
}
}
#[test]
fn test_crdt_prior_consistency() {
let mut orch = ExoTransferOrchestrator::new("e2e_crdt");
// Run 3 cycles; the CRDT should consistently return a valid prior.
for _ in 0..3 {
orch.run_cycle();
}
let prior = orch.best_prior().expect("prior must exist after 3 cycles");
assert_eq!(prior.src_domain, "exo-retrieval");
assert_eq!(prior.dst_domain, "exo-graph");
assert!(prior.cycle >= 1 && prior.cycle <= 3);
}