Files
wifi-densepose/vendor/ruvector/tests/wasm-integration/learning_tests.rs

496 lines
16 KiB
Rust

//! Integration tests for ruvector-learning-wasm
//!
//! Tests for adaptive learning mechanisms:
//! - MicroLoRA: Lightweight Low-Rank Adaptation
//! - SONA: Self-Organizing Neural Architecture
//! - Online learning / continual learning
//! - Meta-learning primitives
#[cfg(test)]
mod tests {
use wasm_bindgen_test::*;
use super::super::common::*;
wasm_bindgen_test_configure!(run_in_browser);
// ========================================================================
// MicroLoRA Tests
// ========================================================================
#[wasm_bindgen_test]
fn test_micro_lora_initialization() {
// Test MicroLoRA adapter initialization
let base_dim = 64;
let rank = 4; // Low rank for efficiency
// TODO: When MicroLoRA is implemented:
// let lora = MicroLoRA::new(base_dim, rank);
//
// Verify A and B matrices are initialized
// assert_eq!(lora.get_rank(), rank);
// assert_eq!(lora.get_dim(), base_dim);
//
// Initial delta should be near zero
// let delta = lora.compute_delta();
// let norm: f32 = delta.iter().map(|x| x * x).sum::<f32>().sqrt();
// assert!(norm < 1e-6, "Initial LoRA delta should be near zero");
assert!(rank < base_dim);
}
#[wasm_bindgen_test]
fn test_micro_lora_forward_pass() {
let base_dim = 64;
let rank = 8;
let input = random_vector(base_dim);
// TODO: Test forward pass through LoRA adapter
// let lora = MicroLoRA::new(base_dim, rank);
// let output = lora.forward(&input);
//
// assert_eq!(output.len(), base_dim);
// assert_finite(&output);
//
// Initially should be close to input (small adaptation)
// let diff: f32 = input.iter().zip(output.iter())
// .map(|(a, b)| (a - b).abs())
// .sum::<f32>();
// assert!(diff < 1.0, "Initial LoRA should have minimal effect");
assert_eq!(input.len(), base_dim);
}
#[wasm_bindgen_test]
fn test_micro_lora_rank_constraint() {
// Verify low-rank constraint is maintained
let base_dim = 128;
let rank = 16;
// TODO: Test rank constraint
// let lora = MicroLoRA::new(base_dim, rank);
//
// Perform some updates
// let gradients = random_vector(base_dim);
// lora.update(&gradients, 0.01);
//
// Verify delta matrix still has effective rank <= rank
// let delta = lora.get_delta_matrix();
// let effective_rank = compute_effective_rank(&delta);
// assert!(effective_rank <= rank as f32 + 0.5);
assert!(rank < base_dim);
}
#[wasm_bindgen_test]
fn test_micro_lora_parameter_efficiency() {
// LoRA should use much fewer parameters than full fine-tuning
let base_dim = 256;
let rank = 8;
// Full matrix: base_dim * base_dim = 65536 parameters
// LoRA: base_dim * rank * 2 = 4096 parameters (16x fewer)
// TODO: Verify parameter count
// let lora = MicroLoRA::new(base_dim, rank);
// let num_params = lora.num_parameters();
//
// let full_params = base_dim * base_dim;
// assert!(num_params < full_params / 10,
// "LoRA should use 10x fewer params: {} vs {}", num_params, full_params);
let lora_params = base_dim * rank * 2;
let full_params = base_dim * base_dim;
assert!(lora_params < full_params / 10);
}
#[wasm_bindgen_test]
fn test_micro_lora_gradient_update() {
let base_dim = 64;
let rank = 4;
let learning_rate = 0.01;
// TODO: Test gradient-based update
// let mut lora = MicroLoRA::new(base_dim, rank);
//
// let input = random_vector(base_dim);
// let target = random_vector(base_dim);
//
// // Forward and compute loss
// let output = lora.forward(&input);
// let loss_before = mse_loss(&output, &target);
//
// // Backward and update
// let gradients = compute_gradients(&output, &target);
// lora.update(&gradients, learning_rate);
//
// // Loss should decrease
// let output_after = lora.forward(&input);
// let loss_after = mse_loss(&output_after, &target);
// assert!(loss_after < loss_before, "Loss should decrease after update");
assert!(learning_rate > 0.0);
}
// ========================================================================
// SONA (Self-Organizing Neural Architecture) Tests
// ========================================================================
#[wasm_bindgen_test]
fn test_sona_initialization() {
let input_dim = 64;
let hidden_dim = 128;
let output_dim = 32;
// TODO: Test SONA initialization
// let sona = SONA::new(input_dim, hidden_dim, output_dim);
//
// assert_eq!(sona.input_dim(), input_dim);
// assert_eq!(sona.output_dim(), output_dim);
//
// Initial architecture should be valid
// assert!(sona.validate_architecture());
assert!(hidden_dim > input_dim);
}
#[wasm_bindgen_test]
fn test_sona_forward_pass() {
let input_dim = 64;
let output_dim = 32;
let input = random_vector(input_dim);
// TODO: Test SONA forward pass
// let sona = SONA::new(input_dim, 128, output_dim);
// let output = sona.forward(&input);
//
// assert_eq!(output.len(), output_dim);
// assert_finite(&output);
assert_eq!(input.len(), input_dim);
}
#[wasm_bindgen_test]
fn test_sona_architecture_adaptation() {
// SONA should adapt its architecture based on data
let input_dim = 32;
let output_dim = 16;
// TODO: Test architecture adaptation
// let mut sona = SONA::new(input_dim, 64, output_dim);
//
// let initial_params = sona.num_parameters();
//
// // Train on simple data (should simplify architecture)
// let simple_data: Vec<(Vec<f32>, Vec<f32>)> = (0..100)
// .map(|_| (random_vector(input_dim), random_vector(output_dim)))
// .collect();
//
// sona.train(&simple_data, 10);
// sona.adapt_architecture();
//
// Architecture might change
// let new_params = sona.num_parameters();
//
// At least verify it still works
// let output = sona.forward(&simple_data[0].0);
// assert_eq!(output.len(), output_dim);
assert!(output_dim < input_dim);
}
#[wasm_bindgen_test]
fn test_sona_neuron_pruning() {
// Test that SONA can prune unnecessary neurons
let input_dim = 64;
let hidden_dim = 256; // Larger than needed
let output_dim = 32;
// TODO: Test neuron pruning
// let mut sona = SONA::new(input_dim, hidden_dim, output_dim);
//
// // Train with low-complexity target
// let data: Vec<_> = (0..100)
// .map(|i| {
// let input = random_vector(input_dim);
// // Simple linear target
// let output: Vec<f32> = input[..output_dim].to_vec();
// (input, output)
// })
// .collect();
//
// sona.train(&data, 20);
//
// let active_neurons_before = sona.count_active_neurons();
// sona.prune_inactive_neurons(0.01); // Prune neurons with low activity
// let active_neurons_after = sona.count_active_neurons();
//
// // Should have pruned some neurons
// assert!(active_neurons_after < active_neurons_before);
assert!(hidden_dim > output_dim);
}
#[wasm_bindgen_test]
fn test_sona_connection_growth() {
// Test that SONA can grow new connections when needed
let input_dim = 32;
let output_dim = 16;
// TODO: Test connection growth
// let mut sona = SONA::new_sparse(input_dim, 64, output_dim, 0.1); // Start sparse
//
// let initial_connections = sona.count_connections();
//
// // Train with complex data requiring more connections
// let complex_data = generate_complex_dataset(100, input_dim, output_dim);
// sona.train(&complex_data, 50);
//
// let final_connections = sona.count_connections();
//
// // Should have grown connections
// assert!(final_connections > initial_connections);
assert!(output_dim < input_dim);
}
// ========================================================================
// Online / Continual Learning Tests
// ========================================================================
#[wasm_bindgen_test]
fn test_online_learning_single_sample() {
let dim = 32;
let input = random_vector(dim);
let target = random_vector(dim);
// TODO: Test single-sample update
// let mut learner = OnlineLearner::new(dim);
//
// let loss_before = learner.predict(&input)
// .iter().zip(target.iter())
// .map(|(p, t)| (p - t).powi(2))
// .sum::<f32>();
//
// learner.learn_sample(&input, &target);
//
// let loss_after = learner.predict(&input)
// .iter().zip(target.iter())
// .map(|(p, t)| (p - t).powi(2))
// .sum::<f32>();
//
// assert!(loss_after < loss_before);
assert_eq!(input.len(), target.len());
}
#[wasm_bindgen_test]
fn test_continual_learning_no_catastrophic_forgetting() {
// Test that learning new tasks doesn't completely forget old ones
let dim = 32;
// TODO: Test catastrophic forgetting mitigation
// let mut learner = ContinualLearner::new(dim);
//
// // Task 1: Learn identity mapping
// let task1_data: Vec<_> = (0..50)
// .map(|_| {
// let x = random_vector(dim);
// (x.clone(), x)
// })
// .collect();
//
// learner.train_task(&task1_data, 10);
// let task1_perf = learner.evaluate(&task1_data);
//
// // Task 2: Learn negation
// let task2_data: Vec<_> = (0..50)
// .map(|_| {
// let x = random_vector(dim);
// let y: Vec<f32> = x.iter().map(|v| -v).collect();
// (x, y)
// })
// .collect();
//
// learner.train_task(&task2_data, 10);
// let task1_perf_after = learner.evaluate(&task1_data);
//
// // Should retain some performance on task 1
// assert!(task1_perf_after > task1_perf * 0.5,
// "Should retain at least 50% of task 1 performance");
assert!(dim > 0);
}
#[wasm_bindgen_test]
fn test_experience_replay() {
// Test experience replay buffer
let dim = 32;
let buffer_size = 100;
// TODO: Test replay buffer
// let mut buffer = ExperienceReplayBuffer::new(buffer_size);
//
// // Fill buffer
// for _ in 0..150 {
// let experience = Experience {
// input: random_vector(dim),
// target: random_vector(dim),
// priority: 1.0,
// };
// buffer.add(experience);
// }
//
// // Buffer should maintain max size
// assert_eq!(buffer.len(), buffer_size);
//
// // Should be able to sample
// let batch = buffer.sample(10);
// assert_eq!(batch.len(), 10);
assert!(buffer_size > 0);
}
// ========================================================================
// Meta-Learning Tests
// ========================================================================
#[wasm_bindgen_test]
fn test_meta_learning_fast_adaptation() {
// Test that meta-learned model can adapt quickly to new tasks
let dim = 32;
// TODO: Test fast adaptation
// let meta_learner = MetaLearner::new(dim);
//
// // Pre-train on distribution of tasks
// let task_distribution = generate_task_distribution(20, dim);
// meta_learner.meta_train(&task_distribution, 100);
//
// // New task (not seen during training)
// let new_task = generate_random_task(dim);
//
// // Should adapt with very few samples
// let few_shot_samples = new_task.sample(5);
// meta_learner.adapt(&few_shot_samples);
//
// // Evaluate on held-out samples from new task
// let test_samples = new_task.sample(20);
// let accuracy = meta_learner.evaluate(&test_samples);
//
// assert!(accuracy > 0.6, "Should achieve >60% with 5-shot learning");
assert!(dim > 0);
}
#[wasm_bindgen_test]
fn test_learning_to_learn() {
// Test that learning rate itself is learned/adapted
let dim = 32;
// TODO: Test learned learning rate
// let mut learner = AdaptiveLearner::new(dim);
//
// // Initial learning rate
// let initial_lr = learner.get_learning_rate();
//
// // Train on varied data
// let data = generate_varied_dataset(100, dim);
// learner.train_with_adaptation(&data, 50);
//
// // Learning rate should have been adapted
// let final_lr = learner.get_learning_rate();
//
// // Not necessarily larger or smaller, just different
// assert!((initial_lr - final_lr).abs() > 1e-6,
// "Learning rate should adapt during training");
assert!(dim > 0);
}
// ========================================================================
// Memory and Efficiency Tests
// ========================================================================
#[wasm_bindgen_test]
fn test_micro_lora_memory_footprint() {
// Verify MicroLoRA uses minimal memory
let base_dim = 512;
let rank = 16;
// TODO: Check memory footprint
// let lora = MicroLoRA::new(base_dim, rank);
//
// // A: base_dim x rank, B: rank x base_dim
// // Total: 2 * base_dim * rank * 4 bytes (f32)
// let expected_bytes = 2 * base_dim * rank * 4;
//
// let actual_bytes = lora.memory_footprint();
//
// // Allow some overhead
// assert!(actual_bytes < expected_bytes * 2,
// "Memory footprint {} exceeds expected {}", actual_bytes, expected_bytes);
let expected_params = 2 * base_dim * rank;
assert!(expected_params < base_dim * base_dim / 10);
}
#[wasm_bindgen_test]
fn test_learning_wasm_bundle_size() {
// Learning WASM should be <50KB gzipped
// This is verified at build time, but we can check module is loadable
// TODO: Verify module loads correctly
// assert!(ruvector_learning_wasm::version().len() > 0);
assert!(true);
}
// ========================================================================
// Numerical Stability Tests
// ========================================================================
#[wasm_bindgen_test]
fn test_gradient_clipping() {
// Test that gradients are properly clipped to prevent explosion
let dim = 32;
// TODO: Test gradient clipping
// let mut lora = MicroLoRA::new(dim, 4);
//
// // Huge gradients
// let huge_gradients: Vec<f32> = vec![1e10; dim];
// lora.update(&huge_gradients, 0.01);
//
// // Parameters should still be reasonable
// let params = lora.get_parameters();
// assert!(params.iter().all(|p| p.abs() < 1e6),
// "Parameters should be clipped");
assert!(dim > 0);
}
#[wasm_bindgen_test]
fn test_numerical_stability_long_training() {
// Test stability over many updates
let dim = 32;
let num_updates = 1000;
// TODO: Test long training stability
// let mut lora = MicroLoRA::new(dim, 4);
//
// for _ in 0..num_updates {
// let gradients = random_vector(dim);
// lora.update(&gradients, 0.001);
// }
//
// // Should still produce finite outputs
// let input = random_vector(dim);
// let output = lora.forward(&input);
// assert_finite(&output);
assert!(num_updates > 0);
}
}