Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
396
vendor/ruvector/tests/wasm-integration/attention_unified_tests.rs
vendored
Normal file
396
vendor/ruvector/tests/wasm-integration/attention_unified_tests.rs
vendored
Normal file
@@ -0,0 +1,396 @@
|
||||
//! Integration tests for ruvector-attention-unified-wasm
|
||||
//!
|
||||
//! Tests for unified attention mechanisms including:
|
||||
//! - Multi-head self-attention
|
||||
//! - Mamba SSM (Selective State Space Model)
|
||||
//! - RWKV attention
|
||||
//! - Flash attention approximation
|
||||
//! - Hyperbolic attention
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use wasm_bindgen_test::*;
|
||||
use super::super::common::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
// ========================================================================
|
||||
// Multi-Head Attention Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_multi_head_attention_basic() {
|
||||
// Setup query, keys, values
|
||||
let dim = 64;
|
||||
let num_heads = 8;
|
||||
let head_dim = dim / num_heads;
|
||||
let seq_len = 16;
|
||||
|
||||
let query = random_vector(dim);
|
||||
let keys: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let values: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: When ruvector-attention-unified-wasm is implemented:
|
||||
// let attention = MultiHeadAttention::new(dim, num_heads);
|
||||
// let output = attention.forward(&query, &keys, &values);
|
||||
//
|
||||
// Assert output shape
|
||||
// assert_eq!(output.len(), dim);
|
||||
// assert_finite(&output);
|
||||
|
||||
// Placeholder assertion
|
||||
assert_eq!(query.len(), dim);
|
||||
assert_eq!(keys.len(), seq_len);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_multi_head_attention_output_shape() {
|
||||
let dim = 128;
|
||||
let num_heads = 16;
|
||||
let seq_len = 32;
|
||||
|
||||
let queries: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let keys: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let values: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: Verify output shape matches (seq_len, dim)
|
||||
// let attention = MultiHeadAttention::new(dim, num_heads);
|
||||
// let outputs = attention.forward_batch(&queries, &keys, &values);
|
||||
// assert_eq!(outputs.len(), seq_len);
|
||||
// for output in &outputs {
|
||||
// assert_eq!(output.len(), dim);
|
||||
// assert_finite(output);
|
||||
// }
|
||||
|
||||
assert_eq!(queries.len(), seq_len);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_multi_head_attention_causality() {
|
||||
// Test that causal masking works correctly
|
||||
let dim = 32;
|
||||
let seq_len = 8;
|
||||
|
||||
// TODO: Verify causal attention doesn't attend to future tokens
|
||||
// let attention = MultiHeadAttention::new_causal(dim, 4);
|
||||
// let weights = attention.get_attention_weights(&queries, &keys);
|
||||
//
|
||||
// For each position i, weights[i][j] should be 0 for j > i
|
||||
// for i in 0..seq_len {
|
||||
// for j in (i+1)..seq_len {
|
||||
// assert_eq!(weights[i][j], 0.0, "Causal violation at ({}, {})", i, j);
|
||||
// }
|
||||
// }
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Mamba SSM Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_mamba_ssm_basic() {
|
||||
// Test O(n) selective scan complexity
|
||||
let dim = 64;
|
||||
let seq_len = 100;
|
||||
|
||||
let input: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: When Mamba SSM is implemented:
|
||||
// let mamba = MambaSSM::new(dim);
|
||||
// let output = mamba.forward(&input);
|
||||
//
|
||||
// Assert O(n) complexity by timing
|
||||
// let start = performance.now();
|
||||
// mamba.forward(&input);
|
||||
// let duration = performance.now() - start;
|
||||
//
|
||||
// Double input size should roughly double time (O(n))
|
||||
// let input_2x = (0..seq_len*2).map(|_| random_vector(dim)).collect();
|
||||
// let start_2x = performance.now();
|
||||
// mamba.forward(&input_2x);
|
||||
// let duration_2x = performance.now() - start_2x;
|
||||
//
|
||||
// assert!(duration_2x < duration * 2.5, "Should be O(n) not O(n^2)");
|
||||
|
||||
assert_eq!(input.len(), seq_len);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_mamba_ssm_selective_scan() {
|
||||
// Test the selective scan mechanism
|
||||
let dim = 32;
|
||||
let seq_len = 50;
|
||||
|
||||
let input: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: Verify selective scan produces valid outputs
|
||||
// let mamba = MambaSSM::new(dim);
|
||||
// let (output, hidden_states) = mamba.forward_with_states(&input);
|
||||
//
|
||||
// Hidden states should evolve based on input
|
||||
// for state in &hidden_states {
|
||||
// assert_finite(state);
|
||||
// }
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_mamba_ssm_state_propagation() {
|
||||
// Test that state is properly propagated across sequence
|
||||
let dim = 16;
|
||||
|
||||
// TODO: Create a simple pattern and verify state carries information
|
||||
// let mamba = MambaSSM::new(dim);
|
||||
//
|
||||
// Input with a spike at position 0
|
||||
// let mut input = vec![vec![0.0; dim]; 20];
|
||||
// input[0] = vec![1.0; dim];
|
||||
//
|
||||
// let output = mamba.forward(&input);
|
||||
//
|
||||
// Later positions should still have some response to the spike
|
||||
// let response_at_5: f32 = output[5].iter().map(|x| x.abs()).sum();
|
||||
// assert!(response_at_5 > 0.01, "State should propagate forward");
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// RWKV Attention Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_rwkv_attention_basic() {
|
||||
let dim = 64;
|
||||
let seq_len = 100;
|
||||
|
||||
let input: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: Test RWKV linear attention
|
||||
// let rwkv = RWKVAttention::new(dim);
|
||||
// let output = rwkv.forward(&input);
|
||||
// assert_eq!(output.len(), seq_len);
|
||||
|
||||
assert!(input.len() == seq_len);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_rwkv_linear_complexity() {
|
||||
// RWKV should be O(n) in sequence length
|
||||
let dim = 32;
|
||||
|
||||
// TODO: Verify linear complexity
|
||||
// let rwkv = RWKVAttention::new(dim);
|
||||
//
|
||||
// Time with 100 tokens
|
||||
// let input_100 = (0..100).map(|_| random_vector(dim)).collect();
|
||||
// let t1 = time_execution(|| rwkv.forward(&input_100));
|
||||
//
|
||||
// Time with 1000 tokens
|
||||
// let input_1000 = (0..1000).map(|_| random_vector(dim)).collect();
|
||||
// let t2 = time_execution(|| rwkv.forward(&input_1000));
|
||||
//
|
||||
// Should be roughly 10x, not 100x (O(n) vs O(n^2))
|
||||
// assert!(t2 < t1 * 20.0, "RWKV should be O(n)");
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Flash Attention Approximation Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_flash_attention_approximation() {
|
||||
let dim = 64;
|
||||
let seq_len = 128;
|
||||
|
||||
let queries: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let keys: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let values: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: Compare flash attention to standard attention
|
||||
// let standard = StandardAttention::new(dim);
|
||||
// let flash = FlashAttention::new(dim);
|
||||
//
|
||||
// let output_standard = standard.forward(&queries, &keys, &values);
|
||||
// let output_flash = flash.forward(&queries, &keys, &values);
|
||||
//
|
||||
// Should be numerically close
|
||||
// for (std_out, flash_out) in output_standard.iter().zip(output_flash.iter()) {
|
||||
// assert_vectors_approx_eq(std_out, flash_out, 1e-4);
|
||||
// }
|
||||
|
||||
assert!(queries.len() == seq_len);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_flash_attention_memory_efficiency() {
|
||||
// Flash attention should use less memory for long sequences
|
||||
let dim = 64;
|
||||
let seq_len = 512;
|
||||
|
||||
// TODO: Verify memory usage is O(n) not O(n^2)
|
||||
// This is harder to test in WASM, but we can verify it doesn't OOM
|
||||
|
||||
assert!(seq_len > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Hyperbolic Attention Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hyperbolic_attention_basic() {
|
||||
let dim = 32;
|
||||
let curvature = -1.0;
|
||||
|
||||
let query = random_vector(dim);
|
||||
let keys: Vec<Vec<f32>> = (0..10).map(|_| random_vector(dim)).collect();
|
||||
let values: Vec<Vec<f32>> = (0..10).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: Test hyperbolic attention
|
||||
// let hyp_attn = HyperbolicAttention::new(dim, curvature);
|
||||
// let output = hyp_attn.forward(&query, &keys, &values);
|
||||
//
|
||||
// assert_eq!(output.len(), dim);
|
||||
// assert_finite(&output);
|
||||
|
||||
assert!(curvature < 0.0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hyperbolic_distance_properties() {
|
||||
// Test Poincare distance metric properties
|
||||
let dim = 8;
|
||||
|
||||
let u = random_vector(dim);
|
||||
let v = random_vector(dim);
|
||||
|
||||
// TODO: Verify metric properties
|
||||
// let d_uv = poincare_distance(&u, &v, 1.0);
|
||||
// let d_vu = poincare_distance(&v, &u, 1.0);
|
||||
//
|
||||
// Symmetry
|
||||
// assert!((d_uv - d_vu).abs() < 1e-6);
|
||||
//
|
||||
// Non-negativity
|
||||
// assert!(d_uv >= 0.0);
|
||||
//
|
||||
// Identity
|
||||
// let d_uu = poincare_distance(&u, &u, 1.0);
|
||||
// assert!(d_uu.abs() < 1e-6);
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Unified Interface Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_attention_mechanism_registry() {
|
||||
// Test that all mechanisms can be accessed through unified interface
|
||||
|
||||
// TODO: Test mechanism registry
|
||||
// let registry = AttentionRegistry::new();
|
||||
//
|
||||
// assert!(registry.has_mechanism("multi_head"));
|
||||
// assert!(registry.has_mechanism("mamba_ssm"));
|
||||
// assert!(registry.has_mechanism("rwkv"));
|
||||
// assert!(registry.has_mechanism("flash"));
|
||||
// assert!(registry.has_mechanism("hyperbolic"));
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_attention_factory() {
|
||||
// Test creating different attention types through factory
|
||||
|
||||
// TODO: Test factory pattern
|
||||
// let factory = AttentionFactory::new();
|
||||
//
|
||||
// let config = AttentionConfig {
|
||||
// dim: 64,
|
||||
// num_heads: 8,
|
||||
// mechanism: "multi_head",
|
||||
// };
|
||||
//
|
||||
// let attention = factory.create(&config);
|
||||
// assert!(attention.is_some());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Numerical Stability Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_attention_numerical_stability_large_values() {
|
||||
let dim = 32;
|
||||
|
||||
// Test with large input values
|
||||
let query: Vec<f32> = (0..dim).map(|i| (i as f32) * 100.0).collect();
|
||||
let keys: Vec<Vec<f32>> = (0..10).map(|i| vec![(i as f32) * 100.0; dim]).collect();
|
||||
|
||||
// TODO: Should not overflow or produce NaN
|
||||
// let attention = MultiHeadAttention::new(dim, 4);
|
||||
// let output = attention.forward(&query, &keys, &values);
|
||||
// assert_finite(&output);
|
||||
|
||||
assert!(query[0].is_finite());
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_attention_numerical_stability_small_values() {
|
||||
let dim = 32;
|
||||
|
||||
// Test with very small input values
|
||||
let query: Vec<f32> = vec![1e-10; dim];
|
||||
let keys: Vec<Vec<f32>> = (0..10).map(|_| vec![1e-10; dim]).collect();
|
||||
|
||||
// TODO: Should not underflow or produce NaN
|
||||
// let attention = MultiHeadAttention::new(dim, 4);
|
||||
// let output = attention.forward(&query, &keys, &values);
|
||||
// assert_finite(&output);
|
||||
|
||||
assert!(query[0].is_finite());
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Performance Constraint Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_attention_latency_target() {
|
||||
// Target: <100 microseconds per mechanism at 100 tokens
|
||||
let dim = 64;
|
||||
let seq_len = 100;
|
||||
|
||||
let queries: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let keys: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
let values: Vec<Vec<f32>> = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
|
||||
// TODO: Measure latency when implemented
|
||||
// let attention = MultiHeadAttention::new(dim, 8);
|
||||
//
|
||||
// Warm up
|
||||
// attention.forward(&queries[0], &keys, &values);
|
||||
//
|
||||
// Measure
|
||||
// let start = performance.now();
|
||||
// for _ in 0..100 {
|
||||
// attention.forward(&queries[0], &keys, &values);
|
||||
// }
|
||||
// let avg_latency_us = (performance.now() - start) * 10.0; // 100 runs -> us
|
||||
//
|
||||
// assert!(avg_latency_us < 100.0, "Latency {} us exceeds 100 us target", avg_latency_us);
|
||||
|
||||
assert!(queries.len() == seq_len);
|
||||
}
|
||||
}
|
||||
549
vendor/ruvector/tests/wasm-integration/economy_tests.rs
vendored
Normal file
549
vendor/ruvector/tests/wasm-integration/economy_tests.rs
vendored
Normal file
@@ -0,0 +1,549 @@
|
||||
//! Integration tests for ruvector-economy-wasm
|
||||
//!
|
||||
//! Tests for economic mechanisms supporting agent coordination:
|
||||
//! - Token economics for resource allocation
|
||||
//! - Auction mechanisms for task assignment
|
||||
//! - Market-based coordination
|
||||
//! - Incentive alignment mechanisms
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use wasm_bindgen_test::*;
|
||||
use super::super::common::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
// ========================================================================
|
||||
// Token Economics Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_token_creation() {
|
||||
// Test creating economic tokens
|
||||
let initial_supply = 1_000_000;
|
||||
|
||||
// TODO: When economy crate is implemented:
|
||||
// let token = Token::new("COMPUTE", initial_supply);
|
||||
//
|
||||
// assert_eq!(token.total_supply(), initial_supply);
|
||||
// assert_eq!(token.symbol(), "COMPUTE");
|
||||
|
||||
assert!(initial_supply > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_token_transfer() {
|
||||
let initial_balance = 1000;
|
||||
|
||||
// TODO: Test token transfer
|
||||
// let mut token = Token::new("COMPUTE", 1_000_000);
|
||||
//
|
||||
// let agent_a = "agent_a";
|
||||
// let agent_b = "agent_b";
|
||||
//
|
||||
// // Mint to agent A
|
||||
// token.mint(agent_a, initial_balance);
|
||||
// assert_eq!(token.balance_of(agent_a), initial_balance);
|
||||
//
|
||||
// // Transfer from A to B
|
||||
// let transfer_amount = 300;
|
||||
// token.transfer(agent_a, agent_b, transfer_amount).unwrap();
|
||||
//
|
||||
// assert_eq!(token.balance_of(agent_a), initial_balance - transfer_amount);
|
||||
// assert_eq!(token.balance_of(agent_b), transfer_amount);
|
||||
|
||||
assert!(initial_balance > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_token_insufficient_balance() {
|
||||
// Test that transfers fail with insufficient balance
|
||||
|
||||
// TODO: Test insufficient balance
|
||||
// let mut token = Token::new("COMPUTE", 1_000_000);
|
||||
//
|
||||
// token.mint("agent_a", 100);
|
||||
//
|
||||
// let result = token.transfer("agent_a", "agent_b", 200);
|
||||
// assert!(result.is_err(), "Should fail with insufficient balance");
|
||||
//
|
||||
// // Balance unchanged on failure
|
||||
// assert_eq!(token.balance_of("agent_a"), 100);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_token_staking() {
|
||||
// Test staking mechanism
|
||||
let stake_amount = 500;
|
||||
|
||||
// TODO: Test staking
|
||||
// let mut token = Token::new("COMPUTE", 1_000_000);
|
||||
//
|
||||
// token.mint("agent_a", 1000);
|
||||
//
|
||||
// // Stake tokens
|
||||
// token.stake("agent_a", stake_amount).unwrap();
|
||||
//
|
||||
// assert_eq!(token.balance_of("agent_a"), 500);
|
||||
// assert_eq!(token.staked_balance("agent_a"), stake_amount);
|
||||
//
|
||||
// // Staked tokens cannot be transferred
|
||||
// let result = token.transfer("agent_a", "agent_b", 600);
|
||||
// assert!(result.is_err());
|
||||
//
|
||||
// // Unstake
|
||||
// token.unstake("agent_a", 200).unwrap();
|
||||
// assert_eq!(token.balance_of("agent_a"), 700);
|
||||
// assert_eq!(token.staked_balance("agent_a"), 300);
|
||||
|
||||
assert!(stake_amount > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Auction Mechanism Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_first_price_auction() {
|
||||
// Test first-price sealed-bid auction
|
||||
|
||||
// TODO: Test first-price auction
|
||||
// let mut auction = FirstPriceAuction::new("task_123");
|
||||
//
|
||||
// // Submit bids
|
||||
// auction.bid("agent_a", 100);
|
||||
// auction.bid("agent_b", 150);
|
||||
// auction.bid("agent_c", 120);
|
||||
//
|
||||
// // Close auction
|
||||
// let result = auction.close();
|
||||
//
|
||||
// // Highest bidder wins, pays their bid
|
||||
// assert_eq!(result.winner, "agent_b");
|
||||
// assert_eq!(result.price, 150);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_second_price_auction() {
|
||||
// Test Vickrey (second-price) auction
|
||||
|
||||
// TODO: Test second-price auction
|
||||
// let mut auction = SecondPriceAuction::new("task_123");
|
||||
//
|
||||
// auction.bid("agent_a", 100);
|
||||
// auction.bid("agent_b", 150);
|
||||
// auction.bid("agent_c", 120);
|
||||
//
|
||||
// let result = auction.close();
|
||||
//
|
||||
// // Highest bidder wins, pays second-highest price
|
||||
// assert_eq!(result.winner, "agent_b");
|
||||
// assert_eq!(result.price, 120);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_dutch_auction() {
|
||||
// Test Dutch (descending price) auction
|
||||
|
||||
// TODO: Test Dutch auction
|
||||
// let mut auction = DutchAuction::new("task_123", 200, 50); // Start 200, floor 50
|
||||
//
|
||||
// // Price decreases over time
|
||||
// auction.tick(); // 190
|
||||
// auction.tick(); // 180
|
||||
// assert!(auction.current_price() < 200);
|
||||
//
|
||||
// // First bidder to accept wins
|
||||
// auction.accept("agent_a");
|
||||
// let result = auction.close();
|
||||
//
|
||||
// assert_eq!(result.winner, "agent_a");
|
||||
// assert_eq!(result.price, auction.current_price());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_multi_item_auction() {
|
||||
// Test auction for multiple items/tasks
|
||||
|
||||
// TODO: Test multi-item auction
|
||||
// let mut auction = MultiItemAuction::new(vec!["task_1", "task_2", "task_3"]);
|
||||
//
|
||||
// // Agents bid on items they want
|
||||
// auction.bid("agent_a", "task_1", 100);
|
||||
// auction.bid("agent_a", "task_2", 80);
|
||||
// auction.bid("agent_b", "task_1", 90);
|
||||
// auction.bid("agent_b", "task_3", 110);
|
||||
// auction.bid("agent_c", "task_2", 95);
|
||||
//
|
||||
// let results = auction.close();
|
||||
//
|
||||
// // Verify allocation
|
||||
// assert_eq!(results.get("task_1").unwrap().winner, "agent_a");
|
||||
// assert_eq!(results.get("task_2").unwrap().winner, "agent_c");
|
||||
// assert_eq!(results.get("task_3").unwrap().winner, "agent_b");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Market Mechanism Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_order_book() {
|
||||
// Test limit order book for compute resources
|
||||
|
||||
// TODO: Test order book
|
||||
// let mut order_book = OrderBook::new("COMPUTE");
|
||||
//
|
||||
// // Place limit orders
|
||||
// order_book.place_limit_order("seller_a", Side::Sell, 10, 100); // Sell 10 @ 100
|
||||
// order_book.place_limit_order("seller_b", Side::Sell, 15, 95); // Sell 15 @ 95
|
||||
// order_book.place_limit_order("buyer_a", Side::Buy, 8, 92); // Buy 8 @ 92
|
||||
//
|
||||
// // Check order book state
|
||||
// assert_eq!(order_book.best_ask(), Some(95));
|
||||
// assert_eq!(order_book.best_bid(), Some(92));
|
||||
//
|
||||
// // Market order that crosses spread
|
||||
// let fills = order_book.place_market_order("buyer_b", Side::Buy, 12);
|
||||
//
|
||||
// // Should fill at best ask prices
|
||||
// assert!(!fills.is_empty());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_automated_market_maker() {
|
||||
// Test AMM (constant product formula)
|
||||
|
||||
// TODO: Test AMM
|
||||
// let mut amm = AutomatedMarketMaker::new(
|
||||
// ("COMPUTE", 1000),
|
||||
// ("CREDIT", 10000),
|
||||
// );
|
||||
//
|
||||
// // Initial price: 10 CREDIT per COMPUTE
|
||||
// assert_eq!(amm.get_price("COMPUTE"), 10.0);
|
||||
//
|
||||
// // Swap CREDIT for COMPUTE
|
||||
// let compute_out = amm.swap("CREDIT", 100);
|
||||
//
|
||||
// // Should get some COMPUTE
|
||||
// assert!(compute_out > 0.0);
|
||||
//
|
||||
// // Price should increase (less COMPUTE in pool)
|
||||
// assert!(amm.get_price("COMPUTE") > 10.0);
|
||||
//
|
||||
// // Constant product should be maintained
|
||||
// let k_before = 1000.0 * 10000.0;
|
||||
// let (compute_reserve, credit_reserve) = amm.reserves();
|
||||
// let k_after = compute_reserve * credit_reserve;
|
||||
// assert!((k_before - k_after).abs() < 1.0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_resource_pricing() {
|
||||
// Test dynamic resource pricing based on demand
|
||||
|
||||
// TODO: Test dynamic pricing
|
||||
// let mut pricing = DynamicPricing::new(100.0); // Base price 100
|
||||
//
|
||||
// // High demand should increase price
|
||||
// pricing.record_demand(0.9); // 90% utilization
|
||||
// pricing.update_price();
|
||||
// assert!(pricing.current_price() > 100.0);
|
||||
//
|
||||
// // Low demand should decrease price
|
||||
// pricing.record_demand(0.2); // 20% utilization
|
||||
// pricing.update_price();
|
||||
// // Price decreases (but not below floor)
|
||||
// assert!(pricing.current_price() < pricing.previous_price());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Incentive Mechanism Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_reputation_system() {
|
||||
// Test reputation-based incentives
|
||||
|
||||
// TODO: Test reputation
|
||||
// let mut reputation = ReputationSystem::new();
|
||||
//
|
||||
// // Complete task successfully
|
||||
// reputation.record_completion("agent_a", "task_1", true, 0.95);
|
||||
//
|
||||
// assert!(reputation.score("agent_a") > 0.0);
|
||||
//
|
||||
// // Failed task decreases reputation
|
||||
// reputation.record_completion("agent_a", "task_2", false, 0.0);
|
||||
//
|
||||
// let score_after_fail = reputation.score("agent_a");
|
||||
// // Score should decrease but not go negative
|
||||
// assert!(score_after_fail >= 0.0);
|
||||
// assert!(score_after_fail < reputation.initial_score());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_slashing_mechanism() {
|
||||
// Test slashing for misbehavior
|
||||
|
||||
// TODO: Test slashing
|
||||
// let mut economy = Economy::new();
|
||||
//
|
||||
// economy.stake("agent_a", 1000);
|
||||
//
|
||||
// // Report misbehavior
|
||||
// let slash_amount = economy.slash("agent_a", "invalid_output", 0.1);
|
||||
//
|
||||
// assert_eq!(slash_amount, 100); // 10% of stake
|
||||
// assert_eq!(economy.staked_balance("agent_a"), 900);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_reward_distribution() {
|
||||
// Test reward distribution among contributors
|
||||
|
||||
// TODO: Test reward distribution
|
||||
// let mut reward_pool = RewardPool::new(1000);
|
||||
//
|
||||
// // Record contributions
|
||||
// reward_pool.record_contribution("agent_a", 0.5);
|
||||
// reward_pool.record_contribution("agent_b", 0.3);
|
||||
// reward_pool.record_contribution("agent_c", 0.2);
|
||||
//
|
||||
// // Distribute rewards
|
||||
// let distribution = reward_pool.distribute();
|
||||
//
|
||||
// assert_eq!(distribution.get("agent_a"), Some(&500));
|
||||
// assert_eq!(distribution.get("agent_b"), Some(&300));
|
||||
// assert_eq!(distribution.get("agent_c"), Some(&200));
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_quadratic_funding() {
|
||||
// Test quadratic funding mechanism
|
||||
|
||||
// TODO: Test quadratic funding
|
||||
// let mut qf = QuadraticFunding::new(10000); // Matching pool
|
||||
//
|
||||
// // Contributions to projects
|
||||
// qf.contribute("project_a", "donor_1", 100);
|
||||
// qf.contribute("project_a", "donor_2", 100);
|
||||
// qf.contribute("project_b", "donor_3", 400);
|
||||
//
|
||||
// // Calculate matching
|
||||
// let matching = qf.calculate_matching();
|
||||
//
|
||||
// // Project A has more unique contributors, should get more matching
|
||||
// // despite receiving less total contributions
|
||||
// // sqrt(100) + sqrt(100) = 20 for A
|
||||
// // sqrt(400) = 20 for B
|
||||
// // A and B should get equal matching (if same total sqrt)
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Coordination Game Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_task_assignment_game() {
|
||||
// Test game-theoretic task assignment
|
||||
|
||||
// TODO: Test task assignment game
|
||||
// let tasks = vec![
|
||||
// Task { id: "t1", complexity: 0.5, reward: 100 },
|
||||
// Task { id: "t2", complexity: 0.8, reward: 200 },
|
||||
// Task { id: "t3", complexity: 0.3, reward: 80 },
|
||||
// ];
|
||||
//
|
||||
// let agents = vec![
|
||||
// Agent { id: "a1", capability: 0.6 },
|
||||
// Agent { id: "a2", capability: 0.9 },
|
||||
// ];
|
||||
//
|
||||
// let game = TaskAssignmentGame::new(tasks, agents);
|
||||
// let assignment = game.find_equilibrium();
|
||||
//
|
||||
// // More capable agent should get harder task
|
||||
// assert_eq!(assignment.get("t2"), Some(&"a2"));
|
||||
//
|
||||
// // Assignment should maximize total value
|
||||
// let total_value = assignment.total_value();
|
||||
// assert!(total_value > 0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_coalition_formation() {
|
||||
// Test coalition formation for collaborative tasks
|
||||
|
||||
// TODO: Test coalition formation
|
||||
// let agents = vec!["a1", "a2", "a3", "a4"];
|
||||
// let task_requirements = TaskRequirements {
|
||||
// min_agents: 2,
|
||||
// capabilities_needed: vec!["coding", "testing"],
|
||||
// };
|
||||
//
|
||||
// let capabilities = hashmap! {
|
||||
// "a1" => vec!["coding"],
|
||||
// "a2" => vec!["testing"],
|
||||
// "a3" => vec!["coding", "testing"],
|
||||
// "a4" => vec!["reviewing"],
|
||||
// };
|
||||
//
|
||||
// let coalition = form_coalition(&agents, &task_requirements, &capabilities);
|
||||
//
|
||||
// // Coalition should satisfy requirements
|
||||
// assert!(coalition.satisfies(&task_requirements));
|
||||
//
|
||||
// // Should be minimal (no unnecessary agents)
|
||||
// assert!(coalition.is_minimal());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Economic Simulation Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_economy_equilibrium() {
|
||||
// Test that economy reaches equilibrium over time
|
||||
|
||||
// TODO: Test equilibrium
|
||||
// let mut economy = Economy::new();
|
||||
//
|
||||
// // Add agents and resources
|
||||
// for i in 0..10 {
|
||||
// economy.add_agent(format!("agent_{}", i));
|
||||
// }
|
||||
// economy.add_resource("compute", 1000);
|
||||
// economy.add_resource("storage", 5000);
|
||||
//
|
||||
// // Run simulation
|
||||
// let initial_prices = economy.get_prices();
|
||||
// for _ in 0..100 {
|
||||
// economy.step();
|
||||
// }
|
||||
// let final_prices = economy.get_prices();
|
||||
//
|
||||
// // Prices should stabilize
|
||||
// economy.step();
|
||||
// let next_prices = economy.get_prices();
|
||||
//
|
||||
// let price_change: f32 = final_prices.iter().zip(next_prices.iter())
|
||||
// .map(|(a, b)| (a - b).abs())
|
||||
// .sum();
|
||||
//
|
||||
// assert!(price_change < 1.0, "Prices should stabilize");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_no_exploitation() {
|
||||
// Test that mechanisms are resistant to exploitation
|
||||
|
||||
// TODO: Test exploitation resistance
|
||||
// let mut auction = SecondPriceAuction::new("task");
|
||||
//
|
||||
// // Dominant strategy in Vickrey auction is to bid true value
|
||||
// // Agent bidding above true value should not increase utility
|
||||
//
|
||||
// let true_value = 100;
|
||||
//
|
||||
// // Simulate multiple runs
|
||||
// let mut overbid_wins = 0;
|
||||
// let mut truthful_wins = 0;
|
||||
// let mut overbid_profit = 0.0;
|
||||
// let mut truthful_profit = 0.0;
|
||||
//
|
||||
// for _ in 0..100 {
|
||||
// let competitor_bid = rand::random::<u64>() % 200;
|
||||
//
|
||||
// // Run with overbid
|
||||
// let mut auction1 = SecondPriceAuction::new("task");
|
||||
// auction1.bid("overbidder", 150); // Overbid
|
||||
// auction1.bid("competitor", competitor_bid);
|
||||
// let result1 = auction1.close();
|
||||
// if result1.winner == "overbidder" {
|
||||
// overbid_wins += 1;
|
||||
// overbid_profit += (true_value - result1.price) as f32;
|
||||
// }
|
||||
//
|
||||
// // Run with truthful bid
|
||||
// let mut auction2 = SecondPriceAuction::new("task");
|
||||
// auction2.bid("truthful", true_value);
|
||||
// auction2.bid("competitor", competitor_bid);
|
||||
// let result2 = auction2.close();
|
||||
// if result2.winner == "truthful" {
|
||||
// truthful_wins += 1;
|
||||
// truthful_profit += (true_value - result2.price) as f32;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Truthful should have higher expected profit
|
||||
// let overbid_avg = overbid_profit / 100.0;
|
||||
// let truthful_avg = truthful_profit / 100.0;
|
||||
// assert!(truthful_avg >= overbid_avg - 1.0,
|
||||
// "Truthful bidding should not be strictly dominated");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// WASM-Specific Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_economy_wasm_initialization() {
|
||||
// TODO: Test WASM init
|
||||
// ruvector_economy_wasm::init();
|
||||
// assert!(ruvector_economy_wasm::version().len() > 0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_economy_js_interop() {
|
||||
// Test JavaScript interoperability
|
||||
|
||||
// TODO: Test JS interop
|
||||
// let auction = FirstPriceAuction::new("task_123");
|
||||
//
|
||||
// // Should be convertible to JsValue
|
||||
// let js_value = auction.to_js();
|
||||
// assert!(js_value.is_object());
|
||||
//
|
||||
// // Should be restorable from JsValue
|
||||
// let restored = FirstPriceAuction::from_js(&js_value).unwrap();
|
||||
// assert_eq!(restored.item_id(), "task_123");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
}
|
||||
641
vendor/ruvector/tests/wasm-integration/exotic_tests.rs
vendored
Normal file
641
vendor/ruvector/tests/wasm-integration/exotic_tests.rs
vendored
Normal file
@@ -0,0 +1,641 @@
|
||||
//! Integration tests for ruvector-exotic-wasm
|
||||
//!
|
||||
//! Tests for exotic AI mechanisms enabling emergent behavior:
|
||||
//! - NAOs (Neural Autonomous Organizations)
|
||||
//! - Morphogenetic Networks
|
||||
//! - Time Crystals for periodic behavior
|
||||
//! - Other experimental mechanisms
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use wasm_bindgen_test::*;
|
||||
use super::super::common::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
// ========================================================================
|
||||
// NAO (Neural Autonomous Organization) Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_creation() {
|
||||
// Test creating a Neural Autonomous Organization
|
||||
|
||||
// TODO: When NAO is implemented:
|
||||
// let config = NAOConfig {
|
||||
// name: "TestDAO",
|
||||
// governance_model: GovernanceModel::Quadratic,
|
||||
// initial_members: 5,
|
||||
// };
|
||||
//
|
||||
// let nao = NAO::new(config);
|
||||
//
|
||||
// assert_eq!(nao.name(), "TestDAO");
|
||||
// assert_eq!(nao.member_count(), 5);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_proposal_voting() {
|
||||
// Test proposal creation and voting
|
||||
|
||||
// TODO: Test voting
|
||||
// let mut nao = NAO::new(default_config());
|
||||
//
|
||||
// // Create proposal
|
||||
// let proposal_id = nao.create_proposal(Proposal {
|
||||
// title: "Increase compute allocation",
|
||||
// action: Action::SetParameter("compute_budget", 1000),
|
||||
// quorum: 0.5,
|
||||
// threshold: 0.6,
|
||||
// });
|
||||
//
|
||||
// // Members vote
|
||||
// nao.vote(proposal_id, "member_1", Vote::Yes);
|
||||
// nao.vote(proposal_id, "member_2", Vote::Yes);
|
||||
// nao.vote(proposal_id, "member_3", Vote::Yes);
|
||||
// nao.vote(proposal_id, "member_4", Vote::No);
|
||||
// nao.vote(proposal_id, "member_5", Vote::Abstain);
|
||||
//
|
||||
// // Execute if passed
|
||||
// let result = nao.finalize_proposal(proposal_id);
|
||||
// assert!(result.is_ok());
|
||||
// assert!(result.unwrap().passed);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_neural_consensus() {
|
||||
// Test neural network-based consensus mechanism
|
||||
|
||||
// TODO: Test neural consensus
|
||||
// let mut nao = NAO::new_neural(NeuralConfig {
|
||||
// consensus_network_dim: 64,
|
||||
// learning_rate: 0.01,
|
||||
// });
|
||||
//
|
||||
// // Proposal represented as vector
|
||||
// let proposal_embedding = random_vector(64);
|
||||
//
|
||||
// // Members submit preference embeddings
|
||||
// let preferences: Vec<Vec<f32>> = nao.members()
|
||||
// .map(|_| random_vector(64))
|
||||
// .collect();
|
||||
//
|
||||
// // Neural network computes consensus
|
||||
// let consensus = nao.compute_neural_consensus(&proposal_embedding, &preferences);
|
||||
//
|
||||
// assert!(consensus.decision.is_some());
|
||||
// assert!(consensus.confidence > 0.0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_delegation() {
|
||||
// Test vote delegation (liquid democracy)
|
||||
|
||||
// TODO: Test delegation
|
||||
// let mut nao = NAO::new(default_config());
|
||||
//
|
||||
// // Member 1 delegates to member 2
|
||||
// nao.delegate("member_1", "member_2");
|
||||
//
|
||||
// // Member 2's vote now has weight 2
|
||||
// let proposal_id = nao.create_proposal(simple_proposal());
|
||||
// nao.vote(proposal_id, "member_2", Vote::Yes);
|
||||
//
|
||||
// let vote_count = nao.get_vote_count(proposal_id, Vote::Yes);
|
||||
// assert_eq!(vote_count, 2.0); // member_2's own vote + delegated vote
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_treasury_management() {
|
||||
// Test treasury operations
|
||||
|
||||
// TODO: Test treasury
|
||||
// let mut nao = NAO::new(default_config());
|
||||
//
|
||||
// // Deposit to treasury
|
||||
// nao.deposit_to_treasury("COMPUTE", 1000);
|
||||
// assert_eq!(nao.treasury_balance("COMPUTE"), 1000);
|
||||
//
|
||||
// // Create spending proposal
|
||||
// let proposal_id = nao.create_proposal(Proposal {
|
||||
// action: Action::Transfer("recipient", "COMPUTE", 100),
|
||||
// ..default_proposal()
|
||||
// });
|
||||
//
|
||||
// // Vote and execute
|
||||
// for member in nao.members() {
|
||||
// nao.vote(proposal_id, member, Vote::Yes);
|
||||
// }
|
||||
// nao.finalize_proposal(proposal_id);
|
||||
//
|
||||
// assert_eq!(nao.treasury_balance("COMPUTE"), 900);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Morphogenetic Network Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_morphogenetic_field_creation() {
|
||||
// Test creating a morphogenetic field
|
||||
|
||||
// TODO: Test morphogenetic field
|
||||
// let config = MorphogeneticConfig {
|
||||
// grid_size: (10, 10),
|
||||
// num_morphogens: 3,
|
||||
// diffusion_rate: 0.1,
|
||||
// decay_rate: 0.01,
|
||||
// };
|
||||
//
|
||||
// let field = MorphogeneticField::new(config);
|
||||
//
|
||||
// assert_eq!(field.grid_size(), (10, 10));
|
||||
// assert_eq!(field.num_morphogens(), 3);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_morphogen_diffusion() {
|
||||
// Test morphogen diffusion dynamics
|
||||
|
||||
// TODO: Test diffusion
|
||||
// let mut field = MorphogeneticField::new(default_config());
|
||||
//
|
||||
// // Set initial concentration at center
|
||||
// field.set_concentration(5, 5, 0, 1.0);
|
||||
//
|
||||
// // Run diffusion
|
||||
// for _ in 0..10 {
|
||||
// field.step();
|
||||
// }
|
||||
//
|
||||
// // Concentration should spread
|
||||
// let center = field.get_concentration(5, 5, 0);
|
||||
// let neighbor = field.get_concentration(5, 6, 0);
|
||||
//
|
||||
// assert!(center < 1.0, "Center should diffuse away");
|
||||
// assert!(neighbor > 0.0, "Neighbors should receive diffused morphogen");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_morphogenetic_pattern_formation() {
|
||||
// Test Turing pattern formation
|
||||
|
||||
// TODO: Test pattern formation
|
||||
// let config = MorphogeneticConfig {
|
||||
// grid_size: (50, 50),
|
||||
// num_morphogens: 2, // Activator and inhibitor
|
||||
// ..turing_pattern_config()
|
||||
// };
|
||||
//
|
||||
// let mut field = MorphogeneticField::new(config);
|
||||
//
|
||||
// // Add small random perturbation
|
||||
// field.add_noise(0.01);
|
||||
//
|
||||
// // Run until pattern forms
|
||||
// for _ in 0..1000 {
|
||||
// field.step();
|
||||
// }
|
||||
//
|
||||
// // Pattern should have formed (non-uniform distribution)
|
||||
// let variance = field.concentration_variance(0);
|
||||
// assert!(variance > 0.01, "Pattern should have formed");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_morphogenetic_network_growth() {
|
||||
// Test network structure emergence from morphogenetic field
|
||||
|
||||
// TODO: Test network growth
|
||||
// let mut field = MorphogeneticField::new(default_config());
|
||||
// let mut network = MorphogeneticNetwork::new(&field);
|
||||
//
|
||||
// // Run growth process
|
||||
// for _ in 0..100 {
|
||||
// field.step();
|
||||
// network.grow(&field);
|
||||
// }
|
||||
//
|
||||
// // Network should have grown
|
||||
// assert!(network.node_count() > 0);
|
||||
// assert!(network.edge_count() > 0);
|
||||
//
|
||||
// // Network structure should reflect morphogen distribution
|
||||
// let high_concentration_regions = field.find_peaks(0);
|
||||
// for peak in &high_concentration_regions {
|
||||
// // Should have more connections near peaks
|
||||
// let local_connectivity = network.local_degree(peak.x, peak.y);
|
||||
// assert!(local_connectivity > 1.0);
|
||||
// }
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_morphogenetic_agent_differentiation() {
|
||||
// Test agent differentiation based on local field
|
||||
|
||||
// TODO: Test differentiation
|
||||
// let field = MorphogeneticField::new(gradient_config());
|
||||
//
|
||||
// // Create agent at different positions
|
||||
// let agent_a = Agent::new((2, 2));
|
||||
// let agent_b = Agent::new((8, 8));
|
||||
//
|
||||
// // Agents differentiate based on local morphogen concentrations
|
||||
// agent_a.differentiate(&field);
|
||||
// agent_b.differentiate(&field);
|
||||
//
|
||||
// // Agents should have different properties based on position
|
||||
// assert_ne!(agent_a.cell_type(), agent_b.cell_type());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Time Crystal Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_time_crystal_creation() {
|
||||
// Test creating a time crystal oscillator
|
||||
|
||||
// TODO: Test time crystal
|
||||
// let crystal = TimeCrystal::new(TimeCrystalConfig {
|
||||
// period: 10,
|
||||
// num_states: 4,
|
||||
// coupling_strength: 0.5,
|
||||
// });
|
||||
//
|
||||
// assert_eq!(crystal.period(), 10);
|
||||
// assert_eq!(crystal.num_states(), 4);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_time_crystal_oscillation() {
|
||||
// Test periodic behavior
|
||||
|
||||
// TODO: Test oscillation
|
||||
// let mut crystal = TimeCrystal::new(default_config());
|
||||
//
|
||||
// // Record states over two periods
|
||||
// let period = crystal.period();
|
||||
// let mut states: Vec<u32> = Vec::new();
|
||||
//
|
||||
// for _ in 0..(period * 2) {
|
||||
// states.push(crystal.current_state());
|
||||
// crystal.step();
|
||||
// }
|
||||
//
|
||||
// // Should repeat after one period
|
||||
// for i in 0..period {
|
||||
// assert_eq!(states[i], states[i + period],
|
||||
// "State should repeat after one period");
|
||||
// }
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_time_crystal_stability() {
|
||||
// Test that oscillation is stable against perturbation
|
||||
|
||||
// TODO: Test stability
|
||||
// let mut crystal = TimeCrystal::new(stable_config());
|
||||
//
|
||||
// // Run for a while to establish rhythm
|
||||
// for _ in 0..100 {
|
||||
// crystal.step();
|
||||
// }
|
||||
//
|
||||
// // Perturb the system
|
||||
// crystal.perturb(0.1);
|
||||
//
|
||||
// // Should recover periodic behavior
|
||||
// let period = crystal.period();
|
||||
// for _ in 0..50 {
|
||||
// crystal.step();
|
||||
// }
|
||||
//
|
||||
// // Check periodicity is restored
|
||||
// let state_t = crystal.current_state();
|
||||
// for _ in 0..period {
|
||||
// crystal.step();
|
||||
// }
|
||||
// let state_t_plus_period = crystal.current_state();
|
||||
//
|
||||
// assert_eq!(state_t, state_t_plus_period, "Should recover periodic behavior");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_time_crystal_synchronization() {
|
||||
// Test synchronization of coupled time crystals
|
||||
|
||||
// TODO: Test synchronization
|
||||
// let mut crystal_a = TimeCrystal::new(default_config());
|
||||
// let mut crystal_b = TimeCrystal::new(default_config());
|
||||
//
|
||||
// // Start with different phases
|
||||
// crystal_a.set_phase(0.0);
|
||||
// crystal_b.set_phase(0.5);
|
||||
//
|
||||
// // Couple them
|
||||
// let coupling = 0.1;
|
||||
//
|
||||
// for _ in 0..1000 {
|
||||
// crystal_a.step_coupled(&crystal_b, coupling);
|
||||
// crystal_b.step_coupled(&crystal_a, coupling);
|
||||
// }
|
||||
//
|
||||
// // Should synchronize
|
||||
// let phase_diff = (crystal_a.phase() - crystal_b.phase()).abs();
|
||||
// assert!(phase_diff < 0.1 || phase_diff > 0.9, "Should synchronize");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_time_crystal_network_coordinator() {
|
||||
// Test using time crystals to coordinate agent network
|
||||
|
||||
// TODO: Test coordination
|
||||
// let network_size = 10;
|
||||
// let mut agents: Vec<Agent> = (0..network_size)
|
||||
// .map(|i| Agent::new(i))
|
||||
// .collect();
|
||||
//
|
||||
// // Each agent has a time crystal for coordination
|
||||
// let crystals: Vec<TimeCrystal> = agents.iter()
|
||||
// .map(|_| TimeCrystal::new(default_config()))
|
||||
// .collect();
|
||||
//
|
||||
// // Couple agents in a ring topology
|
||||
// let coordinator = TimeCrystalCoordinator::ring(crystals);
|
||||
//
|
||||
// // Run coordination
|
||||
// for _ in 0..500 {
|
||||
// coordinator.step();
|
||||
// }
|
||||
//
|
||||
// // All agents should be in sync
|
||||
// let phases: Vec<f32> = coordinator.crystals()
|
||||
// .map(|c| c.phase())
|
||||
// .collect();
|
||||
//
|
||||
// let max_phase_diff = phases.windows(2)
|
||||
// .map(|w| (w[0] - w[1]).abs())
|
||||
// .fold(0.0f32, f32::max);
|
||||
//
|
||||
// assert!(max_phase_diff < 0.2, "Network should synchronize");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Emergent Behavior Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_swarm_intelligence_emergence() {
|
||||
// Test emergence of swarm intelligence from simple rules
|
||||
|
||||
// TODO: Test swarm emergence
|
||||
// let config = SwarmConfig {
|
||||
// num_agents: 100,
|
||||
// separation_weight: 1.0,
|
||||
// alignment_weight: 1.0,
|
||||
// cohesion_weight: 1.0,
|
||||
// };
|
||||
//
|
||||
// let mut swarm = Swarm::new(config);
|
||||
//
|
||||
// // Run simulation
|
||||
// for _ in 0..200 {
|
||||
// swarm.step();
|
||||
// }
|
||||
//
|
||||
// // Should exhibit flocking behavior
|
||||
// let avg_alignment = swarm.compute_average_alignment();
|
||||
// assert!(avg_alignment > 0.5, "Swarm should align");
|
||||
//
|
||||
// let cluster_count = swarm.count_clusters(5.0);
|
||||
// assert!(cluster_count < 5, "Swarm should cluster");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_self_organization() {
|
||||
// Test self-organization without central control
|
||||
|
||||
// TODO: Test self-organization
|
||||
// let mut system = SelfOrganizingSystem::new(50);
|
||||
//
|
||||
// // No central controller, just local interactions
|
||||
// for _ in 0..1000 {
|
||||
// system.step_local_interactions();
|
||||
// }
|
||||
//
|
||||
// // Should have formed structure
|
||||
// let order_parameter = system.compute_order();
|
||||
// assert!(order_parameter > 0.3, "System should self-organize");
|
||||
//
|
||||
// // Structure should be stable
|
||||
// let order_before = system.compute_order();
|
||||
// for _ in 0..100 {
|
||||
// system.step_local_interactions();
|
||||
// }
|
||||
// let order_after = system.compute_order();
|
||||
//
|
||||
// assert!((order_before - order_after).abs() < 0.1,
|
||||
// "Structure should be stable");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_collective_computation() {
|
||||
// Test collective computation capabilities
|
||||
|
||||
// TODO: Test collective computation
|
||||
// let collective = CollectiveComputer::new(20);
|
||||
//
|
||||
// // Collective should be able to solve optimization
|
||||
// let problem = OptimizationProblem {
|
||||
// objective: |x| x.iter().map(|xi| xi * xi).sum(),
|
||||
// dim: 10,
|
||||
// };
|
||||
//
|
||||
// let solution = collective.solve(&problem, 1000);
|
||||
//
|
||||
// // Should find approximate minimum (origin)
|
||||
// let objective_value = problem.objective(&solution);
|
||||
// assert!(objective_value < 1.0, "Should find approximate minimum");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Integration and Cross-Module Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_morphogenetic_integration() {
|
||||
// Test NAO using morphogenetic fields for structure
|
||||
|
||||
// TODO: Test integration
|
||||
// let field = MorphogeneticField::new(default_config());
|
||||
// let nao = NAO::new_morphogenetic(&field);
|
||||
//
|
||||
// // NAO structure emerges from field
|
||||
// assert!(nao.member_count() > 0);
|
||||
//
|
||||
// // Governance influenced by field topology
|
||||
// let proposal_id = nao.create_proposal(simple_proposal());
|
||||
//
|
||||
// // Voting weights determined by morphogenetic position
|
||||
// let weights = nao.get_voting_weights();
|
||||
// assert!(weights.iter().any(|&w| w != 1.0));
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_time_crystal_nao_coordination() {
|
||||
// Test using time crystals to coordinate NAO decisions
|
||||
|
||||
// TODO: Test coordination
|
||||
// let mut nao = NAO::new(default_config());
|
||||
// let crystal = TimeCrystal::new(decision_cycle_config());
|
||||
//
|
||||
// nao.set_decision_coordinator(crystal);
|
||||
//
|
||||
// // Decisions happen at crystal transition points
|
||||
// let proposal_id = nao.create_proposal(simple_proposal());
|
||||
//
|
||||
// // Fast-forward to decision point
|
||||
// while !nao.at_decision_point() {
|
||||
// nao.step();
|
||||
// }
|
||||
//
|
||||
// let result = nao.finalize_proposal(proposal_id);
|
||||
// assert!(result.is_ok());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// WASM-Specific Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_exotic_wasm_initialization() {
|
||||
// TODO: Test WASM init
|
||||
// ruvector_exotic_wasm::init();
|
||||
// assert!(ruvector_exotic_wasm::version().len() > 0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_exotic_serialization() {
|
||||
// Test serialization for persistence
|
||||
|
||||
// TODO: Test serialization
|
||||
// let nao = NAO::new(default_config());
|
||||
//
|
||||
// let json = nao.to_json();
|
||||
// let restored = NAO::from_json(&json).unwrap();
|
||||
//
|
||||
// assert_eq!(nao.name(), restored.name());
|
||||
// assert_eq!(nao.member_count(), restored.member_count());
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_exotic_wasm_bundle_size() {
|
||||
// Exotic WASM should be reasonably sized
|
||||
// Verified at build time, but check module loads
|
||||
|
||||
// TODO: Verify module loads
|
||||
// assert!(ruvector_exotic_wasm::available_mechanisms().len() > 0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Performance and Scalability Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nao_scalability() {
|
||||
// Test NAO with many members
|
||||
|
||||
// TODO: Test scalability
|
||||
// let config = NAOConfig {
|
||||
// initial_members: 1000,
|
||||
// ..default_config()
|
||||
// };
|
||||
//
|
||||
// let nao = NAO::new(config);
|
||||
//
|
||||
// // Should handle large membership
|
||||
// let proposal_id = nao.create_proposal(simple_proposal());
|
||||
//
|
||||
// // Voting should complete in reasonable time
|
||||
// let start = performance.now();
|
||||
// for i in 0..1000 {
|
||||
// nao.vote(proposal_id, format!("member_{}", i), Vote::Yes);
|
||||
// }
|
||||
// let duration = performance.now() - start;
|
||||
//
|
||||
// assert!(duration < 1000.0, "Voting should complete within 1s");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_morphogenetic_field_scalability() {
|
||||
// Test large morphogenetic field
|
||||
|
||||
// TODO: Test field scalability
|
||||
// let config = MorphogeneticConfig {
|
||||
// grid_size: (100, 100),
|
||||
// ..default_config()
|
||||
// };
|
||||
//
|
||||
// let mut field = MorphogeneticField::new(config);
|
||||
//
|
||||
// // Should handle large grid
|
||||
// let start = performance.now();
|
||||
// for _ in 0..100 {
|
||||
// field.step();
|
||||
// }
|
||||
// let duration = performance.now() - start;
|
||||
//
|
||||
// assert!(duration < 5000.0, "100 steps should complete within 5s");
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
}
|
||||
495
vendor/ruvector/tests/wasm-integration/learning_tests.rs
vendored
Normal file
495
vendor/ruvector/tests/wasm-integration/learning_tests.rs
vendored
Normal file
@@ -0,0 +1,495 @@
|
||||
//! Integration tests for ruvector-learning-wasm
|
||||
//!
|
||||
//! Tests for adaptive learning mechanisms:
|
||||
//! - MicroLoRA: Lightweight Low-Rank Adaptation
|
||||
//! - SONA: Self-Organizing Neural Architecture
|
||||
//! - Online learning / continual learning
|
||||
//! - Meta-learning primitives
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use wasm_bindgen_test::*;
|
||||
use super::super::common::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
// ========================================================================
|
||||
// MicroLoRA Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_micro_lora_initialization() {
|
||||
// Test MicroLoRA adapter initialization
|
||||
let base_dim = 64;
|
||||
let rank = 4; // Low rank for efficiency
|
||||
|
||||
// TODO: When MicroLoRA is implemented:
|
||||
// let lora = MicroLoRA::new(base_dim, rank);
|
||||
//
|
||||
// Verify A and B matrices are initialized
|
||||
// assert_eq!(lora.get_rank(), rank);
|
||||
// assert_eq!(lora.get_dim(), base_dim);
|
||||
//
|
||||
// Initial delta should be near zero
|
||||
// let delta = lora.compute_delta();
|
||||
// let norm: f32 = delta.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||
// assert!(norm < 1e-6, "Initial LoRA delta should be near zero");
|
||||
|
||||
assert!(rank < base_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_micro_lora_forward_pass() {
|
||||
let base_dim = 64;
|
||||
let rank = 8;
|
||||
let input = random_vector(base_dim);
|
||||
|
||||
// TODO: Test forward pass through LoRA adapter
|
||||
// let lora = MicroLoRA::new(base_dim, rank);
|
||||
// let output = lora.forward(&input);
|
||||
//
|
||||
// assert_eq!(output.len(), base_dim);
|
||||
// assert_finite(&output);
|
||||
//
|
||||
// Initially should be close to input (small adaptation)
|
||||
// let diff: f32 = input.iter().zip(output.iter())
|
||||
// .map(|(a, b)| (a - b).abs())
|
||||
// .sum::<f32>();
|
||||
// assert!(diff < 1.0, "Initial LoRA should have minimal effect");
|
||||
|
||||
assert_eq!(input.len(), base_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_micro_lora_rank_constraint() {
|
||||
// Verify low-rank constraint is maintained
|
||||
let base_dim = 128;
|
||||
let rank = 16;
|
||||
|
||||
// TODO: Test rank constraint
|
||||
// let lora = MicroLoRA::new(base_dim, rank);
|
||||
//
|
||||
// Perform some updates
|
||||
// let gradients = random_vector(base_dim);
|
||||
// lora.update(&gradients, 0.01);
|
||||
//
|
||||
// Verify delta matrix still has effective rank <= rank
|
||||
// let delta = lora.get_delta_matrix();
|
||||
// let effective_rank = compute_effective_rank(&delta);
|
||||
// assert!(effective_rank <= rank as f32 + 0.5);
|
||||
|
||||
assert!(rank < base_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_micro_lora_parameter_efficiency() {
|
||||
// LoRA should use much fewer parameters than full fine-tuning
|
||||
let base_dim = 256;
|
||||
let rank = 8;
|
||||
|
||||
// Full matrix: base_dim * base_dim = 65536 parameters
|
||||
// LoRA: base_dim * rank * 2 = 4096 parameters (16x fewer)
|
||||
|
||||
// TODO: Verify parameter count
|
||||
// let lora = MicroLoRA::new(base_dim, rank);
|
||||
// let num_params = lora.num_parameters();
|
||||
//
|
||||
// let full_params = base_dim * base_dim;
|
||||
// assert!(num_params < full_params / 10,
|
||||
// "LoRA should use 10x fewer params: {} vs {}", num_params, full_params);
|
||||
|
||||
let lora_params = base_dim * rank * 2;
|
||||
let full_params = base_dim * base_dim;
|
||||
assert!(lora_params < full_params / 10);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_micro_lora_gradient_update() {
|
||||
let base_dim = 64;
|
||||
let rank = 4;
|
||||
let learning_rate = 0.01;
|
||||
|
||||
// TODO: Test gradient-based update
|
||||
// let mut lora = MicroLoRA::new(base_dim, rank);
|
||||
//
|
||||
// let input = random_vector(base_dim);
|
||||
// let target = random_vector(base_dim);
|
||||
//
|
||||
// // Forward and compute loss
|
||||
// let output = lora.forward(&input);
|
||||
// let loss_before = mse_loss(&output, &target);
|
||||
//
|
||||
// // Backward and update
|
||||
// let gradients = compute_gradients(&output, &target);
|
||||
// lora.update(&gradients, learning_rate);
|
||||
//
|
||||
// // Loss should decrease
|
||||
// let output_after = lora.forward(&input);
|
||||
// let loss_after = mse_loss(&output_after, &target);
|
||||
// assert!(loss_after < loss_before, "Loss should decrease after update");
|
||||
|
||||
assert!(learning_rate > 0.0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// SONA (Self-Organizing Neural Architecture) Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_sona_initialization() {
|
||||
let input_dim = 64;
|
||||
let hidden_dim = 128;
|
||||
let output_dim = 32;
|
||||
|
||||
// TODO: Test SONA initialization
|
||||
// let sona = SONA::new(input_dim, hidden_dim, output_dim);
|
||||
//
|
||||
// assert_eq!(sona.input_dim(), input_dim);
|
||||
// assert_eq!(sona.output_dim(), output_dim);
|
||||
//
|
||||
// Initial architecture should be valid
|
||||
// assert!(sona.validate_architecture());
|
||||
|
||||
assert!(hidden_dim > input_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_sona_forward_pass() {
|
||||
let input_dim = 64;
|
||||
let output_dim = 32;
|
||||
|
||||
let input = random_vector(input_dim);
|
||||
|
||||
// TODO: Test SONA forward pass
|
||||
// let sona = SONA::new(input_dim, 128, output_dim);
|
||||
// let output = sona.forward(&input);
|
||||
//
|
||||
// assert_eq!(output.len(), output_dim);
|
||||
// assert_finite(&output);
|
||||
|
||||
assert_eq!(input.len(), input_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_sona_architecture_adaptation() {
|
||||
// SONA should adapt its architecture based on data
|
||||
let input_dim = 32;
|
||||
let output_dim = 16;
|
||||
|
||||
// TODO: Test architecture adaptation
|
||||
// let mut sona = SONA::new(input_dim, 64, output_dim);
|
||||
//
|
||||
// let initial_params = sona.num_parameters();
|
||||
//
|
||||
// // Train on simple data (should simplify architecture)
|
||||
// let simple_data: Vec<(Vec<f32>, Vec<f32>)> = (0..100)
|
||||
// .map(|_| (random_vector(input_dim), random_vector(output_dim)))
|
||||
// .collect();
|
||||
//
|
||||
// sona.train(&simple_data, 10);
|
||||
// sona.adapt_architecture();
|
||||
//
|
||||
// Architecture might change
|
||||
// let new_params = sona.num_parameters();
|
||||
//
|
||||
// At least verify it still works
|
||||
// let output = sona.forward(&simple_data[0].0);
|
||||
// assert_eq!(output.len(), output_dim);
|
||||
|
||||
assert!(output_dim < input_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_sona_neuron_pruning() {
|
||||
// Test that SONA can prune unnecessary neurons
|
||||
let input_dim = 64;
|
||||
let hidden_dim = 256; // Larger than needed
|
||||
let output_dim = 32;
|
||||
|
||||
// TODO: Test neuron pruning
|
||||
// let mut sona = SONA::new(input_dim, hidden_dim, output_dim);
|
||||
//
|
||||
// // Train with low-complexity target
|
||||
// let data: Vec<_> = (0..100)
|
||||
// .map(|i| {
|
||||
// let input = random_vector(input_dim);
|
||||
// // Simple linear target
|
||||
// let output: Vec<f32> = input[..output_dim].to_vec();
|
||||
// (input, output)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// sona.train(&data, 20);
|
||||
//
|
||||
// let active_neurons_before = sona.count_active_neurons();
|
||||
// sona.prune_inactive_neurons(0.01); // Prune neurons with low activity
|
||||
// let active_neurons_after = sona.count_active_neurons();
|
||||
//
|
||||
// // Should have pruned some neurons
|
||||
// assert!(active_neurons_after < active_neurons_before);
|
||||
|
||||
assert!(hidden_dim > output_dim);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_sona_connection_growth() {
|
||||
// Test that SONA can grow new connections when needed
|
||||
let input_dim = 32;
|
||||
let output_dim = 16;
|
||||
|
||||
// TODO: Test connection growth
|
||||
// let mut sona = SONA::new_sparse(input_dim, 64, output_dim, 0.1); // Start sparse
|
||||
//
|
||||
// let initial_connections = sona.count_connections();
|
||||
//
|
||||
// // Train with complex data requiring more connections
|
||||
// let complex_data = generate_complex_dataset(100, input_dim, output_dim);
|
||||
// sona.train(&complex_data, 50);
|
||||
//
|
||||
// let final_connections = sona.count_connections();
|
||||
//
|
||||
// // Should have grown connections
|
||||
// assert!(final_connections > initial_connections);
|
||||
|
||||
assert!(output_dim < input_dim);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Online / Continual Learning Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_online_learning_single_sample() {
|
||||
let dim = 32;
|
||||
|
||||
let input = random_vector(dim);
|
||||
let target = random_vector(dim);
|
||||
|
||||
// TODO: Test single-sample update
|
||||
// let mut learner = OnlineLearner::new(dim);
|
||||
//
|
||||
// let loss_before = learner.predict(&input)
|
||||
// .iter().zip(target.iter())
|
||||
// .map(|(p, t)| (p - t).powi(2))
|
||||
// .sum::<f32>();
|
||||
//
|
||||
// learner.learn_sample(&input, &target);
|
||||
//
|
||||
// let loss_after = learner.predict(&input)
|
||||
// .iter().zip(target.iter())
|
||||
// .map(|(p, t)| (p - t).powi(2))
|
||||
// .sum::<f32>();
|
||||
//
|
||||
// assert!(loss_after < loss_before);
|
||||
|
||||
assert_eq!(input.len(), target.len());
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_continual_learning_no_catastrophic_forgetting() {
|
||||
// Test that learning new tasks doesn't completely forget old ones
|
||||
let dim = 32;
|
||||
|
||||
// TODO: Test catastrophic forgetting mitigation
|
||||
// let mut learner = ContinualLearner::new(dim);
|
||||
//
|
||||
// // Task 1: Learn identity mapping
|
||||
// let task1_data: Vec<_> = (0..50)
|
||||
// .map(|_| {
|
||||
// let x = random_vector(dim);
|
||||
// (x.clone(), x)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// learner.train_task(&task1_data, 10);
|
||||
// let task1_perf = learner.evaluate(&task1_data);
|
||||
//
|
||||
// // Task 2: Learn negation
|
||||
// let task2_data: Vec<_> = (0..50)
|
||||
// .map(|_| {
|
||||
// let x = random_vector(dim);
|
||||
// let y: Vec<f32> = x.iter().map(|v| -v).collect();
|
||||
// (x, y)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// learner.train_task(&task2_data, 10);
|
||||
// let task1_perf_after = learner.evaluate(&task1_data);
|
||||
//
|
||||
// // Should retain some performance on task 1
|
||||
// assert!(task1_perf_after > task1_perf * 0.5,
|
||||
// "Should retain at least 50% of task 1 performance");
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_experience_replay() {
|
||||
// Test experience replay buffer
|
||||
let dim = 32;
|
||||
let buffer_size = 100;
|
||||
|
||||
// TODO: Test replay buffer
|
||||
// let mut buffer = ExperienceReplayBuffer::new(buffer_size);
|
||||
//
|
||||
// // Fill buffer
|
||||
// for _ in 0..150 {
|
||||
// let experience = Experience {
|
||||
// input: random_vector(dim),
|
||||
// target: random_vector(dim),
|
||||
// priority: 1.0,
|
||||
// };
|
||||
// buffer.add(experience);
|
||||
// }
|
||||
//
|
||||
// // Buffer should maintain max size
|
||||
// assert_eq!(buffer.len(), buffer_size);
|
||||
//
|
||||
// // Should be able to sample
|
||||
// let batch = buffer.sample(10);
|
||||
// assert_eq!(batch.len(), 10);
|
||||
|
||||
assert!(buffer_size > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Meta-Learning Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_meta_learning_fast_adaptation() {
|
||||
// Test that meta-learned model can adapt quickly to new tasks
|
||||
let dim = 32;
|
||||
|
||||
// TODO: Test fast adaptation
|
||||
// let meta_learner = MetaLearner::new(dim);
|
||||
//
|
||||
// // Pre-train on distribution of tasks
|
||||
// let task_distribution = generate_task_distribution(20, dim);
|
||||
// meta_learner.meta_train(&task_distribution, 100);
|
||||
//
|
||||
// // New task (not seen during training)
|
||||
// let new_task = generate_random_task(dim);
|
||||
//
|
||||
// // Should adapt with very few samples
|
||||
// let few_shot_samples = new_task.sample(5);
|
||||
// meta_learner.adapt(&few_shot_samples);
|
||||
//
|
||||
// // Evaluate on held-out samples from new task
|
||||
// let test_samples = new_task.sample(20);
|
||||
// let accuracy = meta_learner.evaluate(&test_samples);
|
||||
//
|
||||
// assert!(accuracy > 0.6, "Should achieve >60% with 5-shot learning");
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_learning_to_learn() {
|
||||
// Test that learning rate itself is learned/adapted
|
||||
let dim = 32;
|
||||
|
||||
// TODO: Test learned learning rate
|
||||
// let mut learner = AdaptiveLearner::new(dim);
|
||||
//
|
||||
// // Initial learning rate
|
||||
// let initial_lr = learner.get_learning_rate();
|
||||
//
|
||||
// // Train on varied data
|
||||
// let data = generate_varied_dataset(100, dim);
|
||||
// learner.train_with_adaptation(&data, 50);
|
||||
//
|
||||
// // Learning rate should have been adapted
|
||||
// let final_lr = learner.get_learning_rate();
|
||||
//
|
||||
// // Not necessarily larger or smaller, just different
|
||||
// assert!((initial_lr - final_lr).abs() > 1e-6,
|
||||
// "Learning rate should adapt during training");
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Memory and Efficiency Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_micro_lora_memory_footprint() {
|
||||
// Verify MicroLoRA uses minimal memory
|
||||
let base_dim = 512;
|
||||
let rank = 16;
|
||||
|
||||
// TODO: Check memory footprint
|
||||
// let lora = MicroLoRA::new(base_dim, rank);
|
||||
//
|
||||
// // A: base_dim x rank, B: rank x base_dim
|
||||
// // Total: 2 * base_dim * rank * 4 bytes (f32)
|
||||
// let expected_bytes = 2 * base_dim * rank * 4;
|
||||
//
|
||||
// let actual_bytes = lora.memory_footprint();
|
||||
//
|
||||
// // Allow some overhead
|
||||
// assert!(actual_bytes < expected_bytes * 2,
|
||||
// "Memory footprint {} exceeds expected {}", actual_bytes, expected_bytes);
|
||||
|
||||
let expected_params = 2 * base_dim * rank;
|
||||
assert!(expected_params < base_dim * base_dim / 10);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_learning_wasm_bundle_size() {
|
||||
// Learning WASM should be <50KB gzipped
|
||||
// This is verified at build time, but we can check module is loadable
|
||||
|
||||
// TODO: Verify module loads correctly
|
||||
// assert!(ruvector_learning_wasm::version().len() > 0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Numerical Stability Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_gradient_clipping() {
|
||||
// Test that gradients are properly clipped to prevent explosion
|
||||
let dim = 32;
|
||||
|
||||
// TODO: Test gradient clipping
|
||||
// let mut lora = MicroLoRA::new(dim, 4);
|
||||
//
|
||||
// // Huge gradients
|
||||
// let huge_gradients: Vec<f32> = vec![1e10; dim];
|
||||
// lora.update(&huge_gradients, 0.01);
|
||||
//
|
||||
// // Parameters should still be reasonable
|
||||
// let params = lora.get_parameters();
|
||||
// assert!(params.iter().all(|p| p.abs() < 1e6),
|
||||
// "Parameters should be clipped");
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_numerical_stability_long_training() {
|
||||
// Test stability over many updates
|
||||
let dim = 32;
|
||||
let num_updates = 1000;
|
||||
|
||||
// TODO: Test long training stability
|
||||
// let mut lora = MicroLoRA::new(dim, 4);
|
||||
//
|
||||
// for _ in 0..num_updates {
|
||||
// let gradients = random_vector(dim);
|
||||
// lora.update(&gradients, 0.001);
|
||||
// }
|
||||
//
|
||||
// // Should still produce finite outputs
|
||||
// let input = random_vector(dim);
|
||||
// let output = lora.forward(&input);
|
||||
// assert_finite(&output);
|
||||
|
||||
assert!(num_updates > 0);
|
||||
}
|
||||
}
|
||||
106
vendor/ruvector/tests/wasm-integration/mod.rs
vendored
Normal file
106
vendor/ruvector/tests/wasm-integration/mod.rs
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
//! WASM Integration Tests
|
||||
//!
|
||||
//! Comprehensive test suite for the new edge-net WASM crates:
|
||||
//! - ruvector-attention-unified-wasm: Multi-head attention, Mamba SSM, etc.
|
||||
//! - ruvector-learning-wasm: MicroLoRA, SONA adaptive learning
|
||||
//! - ruvector-nervous-system-wasm: Bio-inspired neural components
|
||||
//! - ruvector-economy-wasm: Economic mechanisms for agent coordination
|
||||
//! - ruvector-exotic-wasm: NAOs, Morphogenetic Networks, Time Crystals
|
||||
//!
|
||||
//! These tests are designed to run in both Node.js and browser environments
|
||||
//! using wasm-bindgen-test.
|
||||
|
||||
pub mod attention_unified_tests;
|
||||
pub mod learning_tests;
|
||||
pub mod nervous_system_tests;
|
||||
pub mod economy_tests;
|
||||
pub mod exotic_tests;
|
||||
|
||||
// Re-export common test utilities
|
||||
pub mod common {
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
/// Generate random f32 vector for testing
|
||||
pub fn random_vector(dim: usize) -> Vec<f32> {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
let mut hasher = DefaultHasher::new();
|
||||
dim.hash(&mut hasher);
|
||||
let seed = hasher.finish();
|
||||
|
||||
(0..dim)
|
||||
.map(|i| {
|
||||
let x = ((seed.wrapping_mul(i as u64 + 1)) % 1000) as f32 / 1000.0;
|
||||
x * 2.0 - 1.0 // Range [-1, 1]
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Assert that two vectors are approximately equal
|
||||
pub fn assert_vectors_approx_eq(a: &[f32], b: &[f32], epsilon: f32) {
|
||||
assert_eq!(a.len(), b.len(), "Vector lengths must match");
|
||||
for (i, (&ai, &bi)) in a.iter().zip(b.iter()).enumerate() {
|
||||
assert!(
|
||||
(ai - bi).abs() < epsilon,
|
||||
"Vectors differ at index {}: {} vs {} (epsilon: {})",
|
||||
i, ai, bi, epsilon
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Assert all values in a vector are finite (not NaN or Inf)
|
||||
pub fn assert_finite(v: &[f32]) {
|
||||
for (i, &x) in v.iter().enumerate() {
|
||||
assert!(x.is_finite(), "Value at index {} is not finite: {}", i, x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Assert vector values are within a given range
|
||||
pub fn assert_in_range(v: &[f32], min: f32, max: f32) {
|
||||
for (i, &x) in v.iter().enumerate() {
|
||||
assert!(
|
||||
x >= min && x <= max,
|
||||
"Value at index {} is out of range [{}, {}]: {}",
|
||||
i, min, max, x
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a simple identity-like attention pattern for testing
|
||||
pub fn create_test_attention_pattern(seq_len: usize, dim: usize) -> (Vec<Vec<f32>>, Vec<Vec<f32>>, Vec<Vec<f32>>) {
|
||||
let queries: Vec<Vec<f32>> = (0..seq_len)
|
||||
.map(|i| {
|
||||
let mut v = vec![0.0; dim];
|
||||
if i < dim {
|
||||
v[i] = 1.0;
|
||||
}
|
||||
v
|
||||
})
|
||||
.collect();
|
||||
|
||||
let keys = queries.clone();
|
||||
let values = queries.clone();
|
||||
|
||||
(queries, keys, values)
|
||||
}
|
||||
|
||||
/// Softmax for verification
|
||||
pub fn softmax(v: &[f32]) -> Vec<f32> {
|
||||
let max = v.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
|
||||
let exp_sum: f32 = v.iter().map(|x| (x - max).exp()).sum();
|
||||
v.iter().map(|x| (x - max).exp() / exp_sum).collect()
|
||||
}
|
||||
|
||||
/// Compute cosine similarity
|
||||
pub fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 {
|
||||
let dot: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
|
||||
let norm_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||
let norm_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
|
||||
if norm_a == 0.0 || norm_b == 0.0 {
|
||||
0.0
|
||||
} else {
|
||||
dot / (norm_a * norm_b)
|
||||
}
|
||||
}
|
||||
}
|
||||
527
vendor/ruvector/tests/wasm-integration/nervous_system_tests.rs
vendored
Normal file
527
vendor/ruvector/tests/wasm-integration/nervous_system_tests.rs
vendored
Normal file
@@ -0,0 +1,527 @@
|
||||
//! Integration tests for ruvector-nervous-system-wasm
|
||||
//!
|
||||
//! Tests for bio-inspired neural components:
|
||||
//! - HDC (Hyperdimensional Computing)
|
||||
//! - BTSP (Behavioral Time-Scale Plasticity)
|
||||
//! - Spiking Neural Networks
|
||||
//! - Neuromorphic processing primitives
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use wasm_bindgen_test::*;
|
||||
use super::super::common::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
// ========================================================================
|
||||
// HDC (Hyperdimensional Computing) Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hdc_vector_encoding() {
|
||||
// Test hypervector encoding
|
||||
let dim = 10000; // HDC typically uses very high dimensions
|
||||
|
||||
// TODO: When HDC is implemented:
|
||||
// let encoder = HDCEncoder::new(dim);
|
||||
//
|
||||
// // Encode a symbol
|
||||
// let hv_a = encoder.encode_symbol("A");
|
||||
// let hv_b = encoder.encode_symbol("B");
|
||||
//
|
||||
// // Should be orthogonal (low similarity)
|
||||
// let similarity = cosine_similarity(&hv_a, &hv_b);
|
||||
// assert!(similarity.abs() < 0.1, "Random HVs should be near-orthogonal");
|
||||
//
|
||||
// // Same symbol should produce same vector
|
||||
// let hv_a2 = encoder.encode_symbol("A");
|
||||
// assert_vectors_approx_eq(&hv_a, &hv_a2, 1e-6);
|
||||
|
||||
assert!(dim >= 1000);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hdc_bundling() {
|
||||
// Test bundling (element-wise addition) operation
|
||||
let dim = 10000;
|
||||
|
||||
// TODO: Test bundling
|
||||
// let encoder = HDCEncoder::new(dim);
|
||||
//
|
||||
// let hv_a = encoder.encode_symbol("A");
|
||||
// let hv_b = encoder.encode_symbol("B");
|
||||
// let hv_c = encoder.encode_symbol("C");
|
||||
//
|
||||
// // Bundle A, B, C
|
||||
// let bundled = HDC::bundle(&[&hv_a, &hv_b, &hv_c]);
|
||||
//
|
||||
// // Bundled vector should be similar to all components
|
||||
// assert!(cosine_similarity(&bundled, &hv_a) > 0.3);
|
||||
// assert!(cosine_similarity(&bundled, &hv_b) > 0.3);
|
||||
// assert!(cosine_similarity(&bundled, &hv_c) > 0.3);
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hdc_binding() {
|
||||
// Test binding (element-wise XOR or multiplication) operation
|
||||
let dim = 10000;
|
||||
|
||||
// TODO: Test binding
|
||||
// let encoder = HDCEncoder::new(dim);
|
||||
//
|
||||
// let hv_a = encoder.encode_symbol("A");
|
||||
// let hv_b = encoder.encode_symbol("B");
|
||||
//
|
||||
// // Bind A with B
|
||||
// let bound = HDC::bind(&hv_a, &hv_b);
|
||||
//
|
||||
// // Bound vector should be orthogonal to both components
|
||||
// assert!(cosine_similarity(&bound, &hv_a).abs() < 0.1);
|
||||
// assert!(cosine_similarity(&bound, &hv_b).abs() < 0.1);
|
||||
//
|
||||
// // Unbinding should recover original
|
||||
// let recovered = HDC::bind(&bound, &hv_b); // bind is its own inverse
|
||||
// assert!(cosine_similarity(&recovered, &hv_a) > 0.9);
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hdc_permutation() {
|
||||
// Test permutation for sequence encoding
|
||||
let dim = 10000;
|
||||
|
||||
// TODO: Test permutation
|
||||
// let encoder = HDCEncoder::new(dim);
|
||||
//
|
||||
// let hv_a = encoder.encode_symbol("A");
|
||||
//
|
||||
// // Permute by position 1, 2, 3
|
||||
// let hv_a_pos1 = HDC::permute(&hv_a, 1);
|
||||
// let hv_a_pos2 = HDC::permute(&hv_a, 2);
|
||||
//
|
||||
// // Permuted vectors should be orthogonal to original
|
||||
// assert!(cosine_similarity(&hv_a, &hv_a_pos1).abs() < 0.1);
|
||||
//
|
||||
// // Inverse permutation should recover original
|
||||
// let recovered = HDC::permute_inverse(&hv_a_pos1, 1);
|
||||
// assert_vectors_approx_eq(&hv_a, &recovered, 1e-6);
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hdc_associative_memory() {
|
||||
// Test HDC as associative memory
|
||||
let dim = 10000;
|
||||
|
||||
// TODO: Test associative memory
|
||||
// let mut memory = HDCAssociativeMemory::new(dim);
|
||||
//
|
||||
// // Store key-value pairs
|
||||
// let key1 = random_vector(dim);
|
||||
// let value1 = random_vector(dim);
|
||||
// memory.store(&key1, &value1);
|
||||
//
|
||||
// let key2 = random_vector(dim);
|
||||
// let value2 = random_vector(dim);
|
||||
// memory.store(&key2, &value2);
|
||||
//
|
||||
// // Retrieve by key
|
||||
// let retrieved1 = memory.retrieve(&key1);
|
||||
// assert!(cosine_similarity(&retrieved1, &value1) > 0.8);
|
||||
//
|
||||
// // Noisy key should still retrieve correct value
|
||||
// let noisy_key1: Vec<f32> = key1.iter()
|
||||
// .map(|x| x + (rand::random::<f32>() - 0.5) * 0.1)
|
||||
// .collect();
|
||||
// let retrieved_noisy = memory.retrieve(&noisy_key1);
|
||||
// assert!(cosine_similarity(&retrieved_noisy, &value1) > 0.6);
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// BTSP (Behavioral Time-Scale Plasticity) Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_btsp_basic() {
|
||||
// Test BTSP learning rule
|
||||
let num_inputs = 100;
|
||||
let num_outputs = 10;
|
||||
|
||||
// TODO: When BTSP is implemented:
|
||||
// let mut btsp = BTSPNetwork::new(num_inputs, num_outputs);
|
||||
//
|
||||
// // Present input pattern
|
||||
// let input = random_vector(num_inputs);
|
||||
// let output = btsp.forward(&input);
|
||||
//
|
||||
// // Apply eligibility trace
|
||||
// btsp.update_eligibility(&input);
|
||||
//
|
||||
// // Apply behavioral signal (reward/plateau potential)
|
||||
// btsp.apply_behavioral_signal(1.0);
|
||||
//
|
||||
// // Weights should be modified
|
||||
// let output_after = btsp.forward(&input);
|
||||
//
|
||||
// // Output should change due to learning
|
||||
// let diff: f32 = output.iter().zip(output_after.iter())
|
||||
// .map(|(a, b)| (a - b).abs())
|
||||
// .sum();
|
||||
// assert!(diff > 0.01, "BTSP should modify network");
|
||||
|
||||
assert!(num_inputs > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_btsp_eligibility_trace() {
|
||||
// Test eligibility trace dynamics
|
||||
let num_inputs = 50;
|
||||
|
||||
// TODO: Test eligibility trace
|
||||
// let mut btsp = BTSPNetwork::new(num_inputs, 10);
|
||||
//
|
||||
// // Present input
|
||||
// let input = random_vector(num_inputs);
|
||||
// btsp.update_eligibility(&input);
|
||||
//
|
||||
// let trace_t0 = btsp.get_eligibility_trace();
|
||||
//
|
||||
// // Trace should decay over time
|
||||
// btsp.step_time(10);
|
||||
// let trace_t10 = btsp.get_eligibility_trace();
|
||||
//
|
||||
// let trace_t0_norm: f32 = trace_t0.iter().map(|x| x * x).sum();
|
||||
// let trace_t10_norm: f32 = trace_t10.iter().map(|x| x * x).sum();
|
||||
//
|
||||
// assert!(trace_t10_norm < trace_t0_norm, "Eligibility should decay");
|
||||
|
||||
assert!(num_inputs > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_btsp_one_shot_learning() {
|
||||
// BTSP should enable one-shot learning with plateau potential
|
||||
let num_inputs = 100;
|
||||
let num_outputs = 10;
|
||||
|
||||
// TODO: Test one-shot learning
|
||||
// let mut btsp = BTSPNetwork::new(num_inputs, num_outputs);
|
||||
//
|
||||
// // Input pattern
|
||||
// let input = random_vector(num_inputs);
|
||||
//
|
||||
// // Target activation
|
||||
// let target_output = 5; // Activate neuron 5
|
||||
//
|
||||
// // One-shot learning: present input + apply plateau to target
|
||||
// btsp.forward(&input);
|
||||
// btsp.update_eligibility(&input);
|
||||
// btsp.apply_plateau_potential(target_output, 1.0);
|
||||
//
|
||||
// // Clear state
|
||||
// btsp.reset_state();
|
||||
//
|
||||
// // Re-present input
|
||||
// let output = btsp.forward(&input);
|
||||
//
|
||||
// // Target neuron should be more active
|
||||
// let target_activity = output[target_output];
|
||||
// let other_max = output.iter()
|
||||
// .enumerate()
|
||||
// .filter(|(i, _)| *i != target_output)
|
||||
// .map(|(_, v)| *v)
|
||||
// .fold(f32::NEG_INFINITY, f32::max);
|
||||
//
|
||||
// assert!(target_activity > other_max, "Target should be most active after one-shot learning");
|
||||
|
||||
assert!(num_outputs > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Spiking Neural Network Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_spiking_neuron_lif() {
|
||||
// Test Leaky Integrate-and-Fire neuron
|
||||
let threshold = 1.0;
|
||||
let tau_m = 10.0; // Membrane time constant
|
||||
|
||||
// TODO: When SNN is implemented:
|
||||
// let mut lif = LIFNeuron::new(threshold, tau_m);
|
||||
//
|
||||
// // Sub-threshold input should not spike
|
||||
// lif.inject_current(0.5);
|
||||
// for _ in 0..10 {
|
||||
// let spike = lif.step(1.0);
|
||||
// assert!(!spike, "Should not spike below threshold");
|
||||
// }
|
||||
//
|
||||
// // Super-threshold input should spike
|
||||
// lif.reset();
|
||||
// lif.inject_current(2.0);
|
||||
// let mut spiked = false;
|
||||
// for _ in 0..20 {
|
||||
// if lif.step(1.0) {
|
||||
// spiked = true;
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// assert!(spiked, "Should spike above threshold");
|
||||
|
||||
assert!(threshold > 0.0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_spiking_network_propagation() {
|
||||
// Test spike propagation through network
|
||||
let num_layers = 3;
|
||||
let neurons_per_layer = 10;
|
||||
|
||||
// TODO: Test spike propagation
|
||||
// let mut network = SpikingNetwork::new(&[
|
||||
// neurons_per_layer,
|
||||
// neurons_per_layer,
|
||||
// neurons_per_layer,
|
||||
// ]);
|
||||
//
|
||||
// // Inject strong current into first layer
|
||||
// network.inject_current(0, vec![2.0; neurons_per_layer]);
|
||||
//
|
||||
// // Run for several timesteps
|
||||
// let mut layer_spikes = vec![vec![]; num_layers];
|
||||
// for t in 0..50 {
|
||||
// let spikes = network.step(1.0);
|
||||
// for (layer, layer_spikes_t) in spikes.iter().enumerate() {
|
||||
// if layer_spikes_t.iter().any(|&s| s) {
|
||||
// layer_spikes[layer].push(t);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Spikes should propagate through layers
|
||||
// assert!(!layer_spikes[0].is_empty(), "First layer should spike");
|
||||
// assert!(!layer_spikes[2].is_empty(), "Output layer should receive spikes");
|
||||
//
|
||||
// // Output layer should spike after input layer
|
||||
// if !layer_spikes[2].is_empty() {
|
||||
// assert!(layer_spikes[2][0] > layer_spikes[0][0],
|
||||
// "Causality: output should spike after input");
|
||||
// }
|
||||
|
||||
assert!(num_layers > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_stdp_learning() {
|
||||
// Test Spike-Timing-Dependent Plasticity
|
||||
let a_plus = 0.01; // Potentiation coefficient
|
||||
let a_minus = 0.01; // Depression coefficient
|
||||
let tau = 20.0; // Time constant
|
||||
|
||||
// TODO: Test STDP
|
||||
// let mut stdp = STDPRule::new(a_plus, a_minus, tau);
|
||||
//
|
||||
// let initial_weight = 0.5;
|
||||
//
|
||||
// // Pre before post (potentiation)
|
||||
// let pre_spike_time = 0.0;
|
||||
// let post_spike_time = 10.0;
|
||||
// let delta_w = stdp.compute_weight_change(pre_spike_time, post_spike_time);
|
||||
// assert!(delta_w > 0.0, "Pre-before-post should potentiate");
|
||||
//
|
||||
// // Post before pre (depression)
|
||||
// let pre_spike_time = 10.0;
|
||||
// let post_spike_time = 0.0;
|
||||
// let delta_w = stdp.compute_weight_change(pre_spike_time, post_spike_time);
|
||||
// assert!(delta_w < 0.0, "Post-before-pre should depress");
|
||||
|
||||
assert!(tau > 0.0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_spiking_temporal_coding() {
|
||||
// Test rate vs temporal coding
|
||||
let num_neurons = 10;
|
||||
|
||||
// TODO: Test temporal coding
|
||||
// let mut network = SpikingNetwork::temporal_coding(num_neurons);
|
||||
//
|
||||
// // Encode value as spike time (earlier = higher value)
|
||||
// let values: Vec<f32> = (0..num_neurons).map(|i| (i as f32) / (num_neurons as f32)).collect();
|
||||
// network.encode_temporal(&values);
|
||||
//
|
||||
// // Run and record spike times
|
||||
// let mut spike_times = vec![f32::INFINITY; num_neurons];
|
||||
// for t in 0..100 {
|
||||
// let spikes = network.step(1.0);
|
||||
// for (i, &spiked) in spikes.iter().enumerate() {
|
||||
// if spiked && spike_times[i] == f32::INFINITY {
|
||||
// spike_times[i] = t as f32;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Higher values should spike earlier
|
||||
// for i in 1..num_neurons {
|
||||
// if spike_times[i] < f32::INFINITY && spike_times[i-1] < f32::INFINITY {
|
||||
// assert!(spike_times[i] < spike_times[i-1],
|
||||
// "Higher value should spike earlier");
|
||||
// }
|
||||
// }
|
||||
|
||||
assert!(num_neurons > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Neuromorphic Processing Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_neuromorphic_attention() {
|
||||
// Test neuromorphic attention mechanism
|
||||
let dim = 64;
|
||||
let num_heads = 4;
|
||||
|
||||
// TODO: Test neuromorphic attention
|
||||
// let attention = NeuromorphicAttention::new(dim, num_heads);
|
||||
//
|
||||
// let query = random_vector(dim);
|
||||
// let keys: Vec<Vec<f32>> = (0..10).map(|_| random_vector(dim)).collect();
|
||||
// let values: Vec<Vec<f32>> = (0..10).map(|_| random_vector(dim)).collect();
|
||||
//
|
||||
// let output = attention.forward(&query, &keys, &values);
|
||||
//
|
||||
// assert_eq!(output.len(), dim);
|
||||
// assert_finite(&output);
|
||||
|
||||
assert!(dim > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_reservoir_computing() {
|
||||
// Test Echo State Network / Reservoir Computing
|
||||
let input_dim = 10;
|
||||
let reservoir_size = 100;
|
||||
let output_dim = 5;
|
||||
|
||||
// TODO: Test reservoir
|
||||
// let reservoir = ReservoirComputer::new(input_dim, reservoir_size, output_dim);
|
||||
//
|
||||
// // Run sequence through reservoir
|
||||
// let sequence: Vec<Vec<f32>> = (0..50).map(|_| random_vector(input_dim)).collect();
|
||||
//
|
||||
// for input in &sequence {
|
||||
// reservoir.step(input);
|
||||
// }
|
||||
//
|
||||
// // Get reservoir state
|
||||
// let state = reservoir.get_state();
|
||||
// assert_eq!(state.len(), reservoir_size);
|
||||
// assert_finite(&state);
|
||||
//
|
||||
// // Train readout
|
||||
// let targets: Vec<Vec<f32>> = (0..50).map(|_| random_vector(output_dim)).collect();
|
||||
// reservoir.train_readout(&targets);
|
||||
//
|
||||
// // Get output
|
||||
// let output = reservoir.predict();
|
||||
// assert_eq!(output.len(), output_dim);
|
||||
|
||||
assert!(reservoir_size > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// Integration Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_hdc_snn_integration() {
|
||||
// Test using HDC with SNN for efficient inference
|
||||
let hd_dim = 1000;
|
||||
let num_classes = 10;
|
||||
|
||||
// TODO: Test HDC + SNN integration
|
||||
// let encoder = HDCEncoder::new(hd_dim);
|
||||
// let classifier = HDCClassifier::new(hd_dim, num_classes);
|
||||
//
|
||||
// // Convert to spiking
|
||||
// let snn = classifier.to_spiking();
|
||||
//
|
||||
// // Encode and classify with SNN
|
||||
// let input = random_vector(hd_dim);
|
||||
// let encoded = encoder.encode(&input);
|
||||
//
|
||||
// let output = snn.forward(&encoded);
|
||||
// assert_eq!(output.len(), num_classes);
|
||||
|
||||
assert!(num_classes > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_energy_efficiency() {
|
||||
// Neuromorphic should be more energy efficient (fewer operations)
|
||||
let dim = 64;
|
||||
let seq_len = 100;
|
||||
|
||||
// TODO: Compare operation counts
|
||||
// let standard_attention = StandardAttention::new(dim);
|
||||
// let neuromorphic_attention = NeuromorphicAttention::new(dim, 4);
|
||||
//
|
||||
// let queries = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
// let keys = (0..seq_len).map(|_| random_vector(dim)).collect();
|
||||
//
|
||||
// let standard_ops = standard_attention.count_operations(&queries, &keys);
|
||||
// let neuro_ops = neuromorphic_attention.count_operations(&queries, &keys);
|
||||
//
|
||||
// // Neuromorphic should use fewer ops (event-driven)
|
||||
// assert!(neuro_ops < standard_ops,
|
||||
// "Neuromorphic should be more efficient: {} vs {}", neuro_ops, standard_ops);
|
||||
|
||||
assert!(seq_len > 0);
|
||||
}
|
||||
|
||||
// ========================================================================
|
||||
// WASM-Specific Tests
|
||||
// ========================================================================
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nervous_system_wasm_initialization() {
|
||||
// Test WASM module initialization
|
||||
// TODO: Verify init
|
||||
// ruvector_nervous_system_wasm::init();
|
||||
// assert!(ruvector_nervous_system_wasm::version().len() > 0);
|
||||
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn test_nervous_system_serialization() {
|
||||
// Test serialization for WASM interop
|
||||
let num_neurons = 10;
|
||||
|
||||
// TODO: Test serialization
|
||||
// let network = SpikingNetwork::new(&[num_neurons, num_neurons]);
|
||||
//
|
||||
// // Serialize to JSON
|
||||
// let json = network.to_json();
|
||||
// assert!(json.len() > 0);
|
||||
//
|
||||
// // Deserialize
|
||||
// let restored = SpikingNetwork::from_json(&json);
|
||||
//
|
||||
// // Should produce same output
|
||||
// let input = random_vector(num_neurons);
|
||||
// let output1 = network.forward(&input);
|
||||
// let output2 = restored.forward(&input);
|
||||
// assert_vectors_approx_eq(&output1, &output2, 1e-6);
|
||||
|
||||
assert!(num_neurons > 0);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user