Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,293 @@
//! Attention Mechanism Latency Benchmarks
//!
//! Benchmark each attention mechanism at 100 tokens.
//! Target: <100 microseconds per mechanism.
//!
//! Run with: cargo bench --bench attention_latency
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId, Throughput};
/// Generate random f32 vector for benchmarking
fn random_vector(dim: usize, seed: u64) -> Vec<f32> {
(0..dim)
.map(|i| {
let x = ((seed.wrapping_mul(i as u64 + 1).wrapping_mul(0x5DEECE66D)) % 1000) as f32;
(x / 500.0) - 1.0 // Range [-1, 1]
})
.collect()
}
/// Generate batch of random vectors
fn random_vectors(count: usize, dim: usize, seed: u64) -> Vec<Vec<f32>> {
(0..count)
.map(|i| random_vector(dim, seed.wrapping_add(i as u64)))
.collect()
}
fn bench_all_attention_mechanisms(c: &mut Criterion) {
let mut group = c.benchmark_group("attention_mechanisms");
// Test parameters
let dim = 64;
let num_heads = 8;
let seq_len = 100; // Target: 100 tokens
// Generate test data
let queries = random_vectors(seq_len, dim, 42);
let keys = random_vectors(seq_len, dim, 123);
let values = random_vectors(seq_len, dim, 456);
// Set throughput for tokens/second calculation
group.throughput(Throughput::Elements(seq_len as u64));
// ========================================================================
// Multi-Head Attention Benchmark
// ========================================================================
group.bench_function("multi_head_attention", |b| {
// TODO: When implemented:
// let attention = MultiHeadAttention::new(dim, num_heads);
b.iter(|| {
// TODO: Replace with actual attention computation
// attention.forward(&queries, &keys, &values)
// Placeholder: simulate attention computation
let mut output = vec![0.0f32; dim];
for q in &queries {
for (k, v) in keys.iter().zip(values.iter()) {
let score: f32 = q.iter().zip(k.iter()).map(|(a, b)| a * b).sum();
for (o, vi) in output.iter_mut().zip(v.iter()) {
*o += score * vi * 0.001;
}
}
}
output
});
});
// ========================================================================
// Mamba SSM Benchmark
// ========================================================================
group.bench_function("mamba_ssm", |b| {
// TODO: When implemented:
// let mamba = MambaSSM::new(dim);
b.iter(|| {
// TODO: Replace with actual Mamba SSM computation
// mamba.forward(&queries)
// Placeholder: simulate O(n) selective scan
let mut hidden = vec![0.0f32; dim];
for input in &queries {
for (h, x) in hidden.iter_mut().zip(input.iter()) {
*h = *h * 0.9 + *x * 0.1;
}
}
hidden
});
});
// ========================================================================
// RWKV Attention Benchmark
// ========================================================================
group.bench_function("rwkv_attention", |b| {
// TODO: When implemented:
// let rwkv = RWKVAttention::new(dim);
b.iter(|| {
// TODO: Replace with actual RWKV computation
// rwkv.forward(&queries)
// Placeholder: simulate linear attention
let mut state = vec![0.0f32; dim];
for input in &queries {
for (s, x) in state.iter_mut().zip(input.iter()) {
*s = *s * 0.95 + *x;
}
}
state
});
});
// ========================================================================
// Flash Attention Approximation Benchmark
// ========================================================================
group.bench_function("flash_attention_approx", |b| {
// TODO: When implemented:
// let flash = FlashAttention::new(dim);
b.iter(|| {
// TODO: Replace with actual Flash Attention
// flash.forward(&queries, &keys, &values)
// Placeholder: simulate tiled computation
let tile_size = 16;
let mut output = vec![0.0f32; dim];
for tile_start in (0..seq_len).step_by(tile_size) {
let tile_end = (tile_start + tile_size).min(seq_len);
for i in tile_start..tile_end {
for j in 0..dim {
output[j] += queries[i][j] * 0.01;
}
}
}
output
});
});
// ========================================================================
// Hyperbolic Attention Benchmark
// ========================================================================
group.bench_function("hyperbolic_attention", |b| {
// TODO: When implemented:
// let hyp_attn = HyperbolicAttention::new(dim, -1.0);
b.iter(|| {
// TODO: Replace with actual hyperbolic attention
// hyp_attn.forward(&queries[0], &keys, &values)
// Placeholder: simulate Poincare operations
let query = &queries[0];
let mut output = vec![0.0f32; dim];
for (k, v) in keys.iter().zip(values.iter()) {
// Simplified Poincare distance
let dist: f32 = query.iter().zip(k.iter())
.map(|(a, b)| (a - b).powi(2))
.sum::<f32>()
.sqrt();
let weight = (-dist).exp();
for (o, vi) in output.iter_mut().zip(v.iter()) {
*o += weight * vi;
}
}
output
});
});
group.finish();
}
fn bench_attention_scaling(c: &mut Criterion) {
let mut group = c.benchmark_group("attention_scaling");
let dim = 64;
// Test different sequence lengths
for seq_len in [32, 64, 128, 256, 512].iter() {
let queries = random_vectors(*seq_len, dim, 42);
let keys = random_vectors(*seq_len, dim, 123);
let values = random_vectors(*seq_len, dim, 456);
group.throughput(Throughput::Elements(*seq_len as u64));
group.bench_with_input(
BenchmarkId::new("multi_head", seq_len),
&(&queries, &keys, &values),
|b, (q, k, v)| {
b.iter(|| {
// TODO: Replace with actual attention
let mut output = vec![0.0f32; dim];
for qi in q.iter() {
for (ki, vi) in k.iter().zip(v.iter()) {
let score: f32 = qi.iter().zip(ki.iter())
.map(|(a, b)| a * b).sum();
for (o, vij) in output.iter_mut().zip(vi.iter()) {
*o += score * vij * 0.001;
}
}
}
output
});
},
);
group.bench_with_input(
BenchmarkId::new("mamba_ssm", seq_len),
&(&queries,),
|b, (input,)| {
b.iter(|| {
// TODO: Replace with actual Mamba SSM
let mut hidden = vec![0.0f32; dim];
for inp in input.iter() {
for (h, x) in hidden.iter_mut().zip(inp.iter()) {
*h = *h * 0.9 + *x * 0.1;
}
}
hidden
});
},
);
}
group.finish();
}
fn bench_attention_memory(c: &mut Criterion) {
let mut group = c.benchmark_group("attention_memory");
// Test memory-efficient vs standard attention
let dim = 64;
let seq_len = 256;
let queries = random_vectors(seq_len, dim, 42);
let keys = random_vectors(seq_len, dim, 123);
let values = random_vectors(seq_len, dim, 456);
group.bench_function("standard_attention", |b| {
b.iter(|| {
// Full attention matrix: O(n^2) memory
let mut attn_matrix = vec![vec![0.0f32; seq_len]; seq_len];
for i in 0..seq_len {
for j in 0..seq_len {
attn_matrix[i][j] = queries[i].iter()
.zip(keys[j].iter())
.map(|(a, b)| a * b)
.sum();
}
}
attn_matrix
});
});
group.bench_function("memory_efficient_attention", |b| {
b.iter(|| {
// Compute attention row by row: O(n) memory
let mut output = vec![vec![0.0f32; dim]; seq_len];
for i in 0..seq_len {
let mut scores = vec![0.0f32; seq_len];
for j in 0..seq_len {
scores[j] = queries[i].iter()
.zip(keys[j].iter())
.map(|(a, b)| a * b)
.sum();
}
// Softmax
let max = scores.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
let exp_sum: f32 = scores.iter().map(|s| (s - max).exp()).sum();
for (j, score) in scores.iter().enumerate() {
let weight = (score - max).exp() / exp_sum;
for (k, v) in output[i].iter_mut().zip(values[j].iter()) {
*k += weight * v;
}
}
}
output
});
});
group.finish();
}
criterion_group!(
benches,
bench_all_attention_mechanisms,
bench_attention_scaling,
bench_attention_memory
);
criterion_main!(benches);

View File

@@ -0,0 +1,378 @@
//! Learning Mechanism Performance Benchmarks
//!
//! Benchmarks for MicroLoRA, SONA, and adaptive learning.
//! Focus on parameter efficiency and training speed.
//!
//! Run with: cargo bench --bench learning_performance
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId, Throughput};
/// Generate random f32 vector
fn random_vector(dim: usize, seed: u64) -> Vec<f32> {
(0..dim)
.map(|i| {
let x = ((seed.wrapping_mul(i as u64 + 1).wrapping_mul(0x5DEECE66D)) % 1000) as f32;
(x / 500.0) - 1.0
})
.collect()
}
fn bench_micro_lora(c: &mut Criterion) {
let mut group = c.benchmark_group("micro_lora");
// Test different ranks and dimensions
for (dim, rank) in [(64, 4), (128, 8), (256, 16), (512, 32)].iter() {
let input = random_vector(*dim, 42);
let gradients = random_vector(*dim, 123);
group.bench_with_input(
BenchmarkId::new("forward", format!("d{}_r{}", dim, rank)),
&(&input, dim, rank),
|b, (inp, d, r)| {
// TODO: When MicroLoRA is implemented:
// let lora = MicroLoRA::new(*d, *r);
b.iter(|| {
// Placeholder: simulate LoRA forward pass
// output = input + B @ A @ input
// A: dim x rank projection
let mut projected = vec![0.0f32; **r];
for i in 0..**r {
for j in 0..**d {
projected[i] += inp[j] * 0.01;
}
}
// B: rank x dim projection
let mut output = vec![0.0f32; **d];
for i in 0..**d {
for j in 0..**r {
output[i] += projected[j] * 0.01;
}
}
// Add residual
for (o, i) in output.iter_mut().zip(inp.iter()) {
*o += i;
}
output
});
},
);
group.bench_with_input(
BenchmarkId::new("backward", format!("d{}_r{}", dim, rank)),
&(&gradients, dim, rank),
|b, (grad, d, r)| {
b.iter(|| {
// Placeholder: simulate LoRA backward pass
// Compute gradients for A and B
let grad_a = vec![vec![0.01f32; **r]; **d];
let grad_b = vec![vec![0.01f32; **d]; **r];
(grad_a, grad_b)
});
},
);
}
group.finish();
}
fn bench_sona_adaptation(c: &mut Criterion) {
let mut group = c.benchmark_group("sona");
let input_dim = 64;
let hidden_dim = 128;
let output_dim = 32;
let input = random_vector(input_dim, 42);
let target = random_vector(output_dim, 123);
group.bench_function("forward", |b| {
// TODO: When SONA is implemented:
// let sona = SONA::new(input_dim, hidden_dim, output_dim);
b.iter(|| {
// Placeholder: simulate SONA forward
let mut hidden = vec![0.0f32; hidden_dim];
for i in 0..hidden_dim {
for j in 0..input_dim {
hidden[i] += input[j] * 0.01;
}
hidden[i] = hidden[i].tanh();
}
let mut output = vec![0.0f32; output_dim];
for i in 0..output_dim {
for j in 0..hidden_dim {
output[i] += hidden[j] * 0.01;
}
}
output
});
});
group.bench_function("adapt_architecture", |b| {
b.iter(|| {
// Placeholder: simulate architecture adaptation
// Analyze activation statistics and prune/grow neurons
let neuron_activities = random_vector(hidden_dim, 456);
let threshold = 0.1;
let active_count: usize = neuron_activities.iter()
.filter(|&a| a.abs() > threshold)
.count();
active_count
});
});
group.bench_function("prune_neurons", |b| {
b.iter(|| {
// Placeholder: simulate neuron pruning
let neuron_importance = random_vector(hidden_dim, 789);
let prune_threshold = 0.05;
let kept_indices: Vec<usize> = neuron_importance.iter()
.enumerate()
.filter(|(_, &imp)| imp.abs() > prune_threshold)
.map(|(i, _)| i)
.collect();
kept_indices
});
});
group.finish();
}
fn bench_online_learning(c: &mut Criterion) {
let mut group = c.benchmark_group("online_learning");
let dim = 64;
let num_samples = 100;
// Generate training samples
let samples: Vec<(Vec<f32>, Vec<f32>)> = (0..num_samples)
.map(|i| {
let input = random_vector(dim, i as u64);
let target = random_vector(dim, (i + 1000) as u64);
(input, target)
})
.collect();
group.throughput(Throughput::Elements(num_samples as u64));
group.bench_function("single_sample_update", |b| {
b.iter(|| {
// Placeholder: simulate single-sample SGD update
let (input, target) = &samples[0];
let learning_rate = 0.01;
// Forward
let mut output = vec![0.0f32; dim];
for i in 0..dim {
for j in 0..dim {
output[i] += input[j] * 0.01;
}
}
// Compute gradients
let mut gradients = vec![0.0f32; dim];
for i in 0..dim {
gradients[i] = 2.0 * (output[i] - target[i]);
}
// Update (simulated)
let weight_update: f32 = gradients.iter().map(|g| g * learning_rate).sum();
weight_update
});
});
group.bench_function("batch_update_100", |b| {
b.iter(|| {
// Placeholder: simulate batch update
let mut total_gradient = vec![0.0f32; dim];
for (input, target) in &samples {
// Forward
let mut output = vec![0.0f32; dim];
for i in 0..dim {
for j in 0..dim {
output[i] += input[j] * 0.01;
}
}
// Accumulate gradients
for i in 0..dim {
total_gradient[i] += 2.0 * (output[i] - target[i]) / (num_samples as f32);
}
}
total_gradient
});
});
group.finish();
}
fn bench_experience_replay(c: &mut Criterion) {
let mut group = c.benchmark_group("experience_replay");
let dim = 64;
let buffer_size = 10000;
let batch_size = 32;
// Pre-fill buffer
let buffer: Vec<(Vec<f32>, Vec<f32>)> = (0..buffer_size)
.map(|i| {
(random_vector(dim, i as u64), random_vector(dim, (i + 10000) as u64))
})
.collect();
group.bench_function("uniform_sampling", |b| {
b.iter(|| {
// Uniform random sampling
let mut batch = Vec::with_capacity(batch_size);
for i in 0..batch_size {
let idx = (i * 313 + 7) % buffer_size; // Pseudo-random
batch.push(&buffer[idx]);
}
batch
});
});
group.bench_function("prioritized_sampling", |b| {
// Priorities (simulated)
let priorities: Vec<f32> = (0..buffer_size)
.map(|i| 1.0 + (i as f32 / buffer_size as f32))
.collect();
let total_priority: f32 = priorities.iter().sum();
b.iter(|| {
// Prioritized sampling (simplified)
let mut batch = Vec::with_capacity(batch_size);
let segment = total_priority / batch_size as f32;
for i in 0..batch_size {
let target = (i as f32 + 0.5) * segment;
let mut cumsum = 0.0;
for (idx, &p) in priorities.iter().enumerate() {
cumsum += p;
if cumsum >= target {
batch.push(&buffer[idx]);
break;
}
}
}
batch
});
});
group.finish();
}
fn bench_meta_learning(c: &mut Criterion) {
let mut group = c.benchmark_group("meta_learning");
let dim = 32;
let num_tasks = 5;
let shots_per_task = 5;
// Generate task data
let tasks: Vec<Vec<(Vec<f32>, Vec<f32>)>> = (0..num_tasks)
.map(|t| {
(0..shots_per_task)
.map(|s| {
let input = random_vector(dim, (t * 100 + s) as u64);
let target = random_vector(dim, (t * 100 + s + 50) as u64);
(input, target)
})
.collect()
})
.collect();
group.bench_function("inner_loop_adaptation", |b| {
b.iter(|| {
// MAML-style inner loop (simplified)
let task = &tasks[0];
let inner_lr = 0.01;
let inner_steps = 5;
let mut adapted_weights = vec![0.0f32; dim * dim];
for _ in 0..inner_steps {
let mut gradient = vec![0.0f32; dim * dim];
for (input, target) in task {
// Forward
let mut output = vec![0.0f32; dim];
for i in 0..dim {
for j in 0..dim {
output[i] += input[j] * adapted_weights[i * dim + j];
}
}
// Backward
for i in 0..dim {
for j in 0..dim {
gradient[i * dim + j] += 2.0 * (output[i] - target[i]) * input[j];
}
}
}
// Update
for (w, g) in adapted_weights.iter_mut().zip(gradient.iter()) {
*w -= inner_lr * g / (shots_per_task as f32);
}
}
adapted_weights
});
});
group.bench_function("outer_loop_update", |b| {
b.iter(|| {
// Meta-gradient computation (simplified)
let outer_lr = 0.001;
let mut meta_gradient = vec![0.0f32; dim * dim];
for task in &tasks {
// Simulate adapted performance gradient
for (input, target) in task {
for i in 0..dim {
for j in 0..dim {
meta_gradient[i * dim + j] += input[j] * target[i] * 0.001;
}
}
}
}
// Scale by learning rate
for g in &mut meta_gradient {
*g *= outer_lr / (num_tasks as f32);
}
meta_gradient
});
});
group.finish();
}
criterion_group!(
benches,
bench_micro_lora,
bench_sona_adaptation,
bench_online_learning,
bench_experience_replay,
bench_meta_learning
);
criterion_main!(benches);

View File

@@ -0,0 +1,430 @@
//! Neuromorphic Component Benchmarks
//!
//! Benchmarks for bio-inspired neural components:
//! - HDC (Hyperdimensional Computing)
//! - BTSP (Behavioral Time-Scale Plasticity)
//! - Spiking Neural Networks
//!
//! Run with: cargo bench --bench neuromorphic_benchmarks
use criterion::{criterion_group, criterion_main, Criterion, BenchmarkId, Throughput};
/// Generate random f32 vector
fn random_vector(dim: usize, seed: u64) -> Vec<f32> {
(0..dim)
.map(|i| {
let x = ((seed.wrapping_mul(i as u64 + 1).wrapping_mul(0x5DEECE66D)) % 1000) as f32;
(x / 500.0) - 1.0
})
.collect()
}
/// Generate binary hypervector
fn random_binary_hv(dim: usize, seed: u64) -> Vec<i8> {
(0..dim)
.map(|i| {
if ((seed.wrapping_mul(i as u64 + 1).wrapping_mul(0x5DEECE66D)) % 2) == 0 {
1
} else {
-1
}
})
.collect()
}
fn bench_hdc_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("hdc");
// HDC typically uses high dimensions for orthogonality
for dim in [1000, 4000, 10000].iter() {
let hv_a = random_binary_hv(*dim, 42);
let hv_b = random_binary_hv(*dim, 123);
let hv_c = random_binary_hv(*dim, 456);
group.throughput(Throughput::Elements(*dim as u64));
// Bundling (element-wise majority/addition)
group.bench_with_input(
BenchmarkId::new("bundle_3", dim),
&(&hv_a, &hv_b, &hv_c),
|b, (a, b_hv, c)| {
b.iter(|| {
let bundled: Vec<i8> = (0..a.len())
.map(|i| {
let sum = a[i] as i32 + b_hv[i] as i32 + c[i] as i32;
if sum > 0 { 1 } else { -1 }
})
.collect();
bundled
});
},
);
// Binding (element-wise XOR / multiplication)
group.bench_with_input(
BenchmarkId::new("bind", dim),
&(&hv_a, &hv_b),
|b, (a, b_hv)| {
b.iter(|| {
let bound: Vec<i8> = a.iter()
.zip(b_hv.iter())
.map(|(&ai, &bi)| ai * bi)
.collect();
bound
});
},
);
// Permutation (cyclic shift)
group.bench_with_input(
BenchmarkId::new("permute", dim),
&(&hv_a,),
|b, (a,)| {
b.iter(|| {
let shift = 7;
let mut permuted = vec![0i8; a.len()];
for i in 0..a.len() {
permuted[(i + shift) % a.len()] = a[i];
}
permuted
});
},
);
// Similarity (Hamming distance / cosine)
group.bench_with_input(
BenchmarkId::new("similarity", dim),
&(&hv_a, &hv_b),
|b, (a, b_hv)| {
b.iter(|| {
let matching: i32 = a.iter()
.zip(b_hv.iter())
.map(|(&ai, &bi)| (ai * bi) as i32)
.sum();
matching as f32 / a.len() as f32
});
},
);
}
group.finish();
}
fn bench_hdc_encoding(c: &mut Criterion) {
let mut group = c.benchmark_group("hdc_encoding");
let hd_dim = 10000;
let input_dim = 64;
let input = random_vector(input_dim, 42);
// Level hypervectors for encoding continuous values
let num_levels = 100;
let level_hvs: Vec<Vec<i8>> = (0..num_levels)
.map(|i| random_binary_hv(hd_dim, i as u64))
.collect();
// Position hypervectors for encoding positions
let pos_hvs: Vec<Vec<i8>> = (0..input_dim)
.map(|i| random_binary_hv(hd_dim, (i + 1000) as u64))
.collect();
group.bench_function("encode_vector", |b| {
b.iter(|| {
// Encode each dimension and bundle
let mut result = vec![0i32; hd_dim];
for (i, &val) in input.iter().enumerate() {
// Quantize to level
let level = ((val + 1.0) / 2.0 * (num_levels - 1) as f32) as usize;
let level = level.min(num_levels - 1);
// Bind position with level
for j in 0..hd_dim {
result[j] += (pos_hvs[i][j] * level_hvs[level][j]) as i32;
}
}
// Threshold to binary
let encoded: Vec<i8> = result.iter()
.map(|&v| if v > 0 { 1 } else { -1 })
.collect();
encoded
});
});
group.finish();
}
fn bench_btsp(c: &mut Criterion) {
let mut group = c.benchmark_group("btsp");
let num_inputs = 100;
let num_outputs = 10;
let input = random_vector(num_inputs, 42);
let weights: Vec<Vec<f32>> = (0..num_outputs)
.map(|i| random_vector(num_inputs, i as u64))
.collect();
group.bench_function("forward", |b| {
b.iter(|| {
// Forward pass with dendritic compartments
let mut outputs = vec![0.0f32; num_outputs];
for i in 0..num_outputs {
for j in 0..num_inputs {
outputs[i] += weights[i][j] * input[j];
}
// Dendritic nonlinearity
outputs[i] = outputs[i].tanh();
}
outputs
});
});
group.bench_function("eligibility_update", |b| {
let tau_e = 100.0; // Eligibility trace time constant
let mut eligibility = vec![vec![0.0f32; num_inputs]; num_outputs];
b.iter(|| {
// Update eligibility traces
for i in 0..num_outputs {
for j in 0..num_inputs {
eligibility[i][j] *= (-1.0 / tau_e).exp();
eligibility[i][j] += input[j];
}
}
&eligibility
});
});
group.bench_function("behavioral_update", |b| {
let eligibility = vec![vec![0.5f32; num_inputs]; num_outputs];
let mut weights = weights.clone();
let learning_rate = 0.01;
b.iter(|| {
// Apply behavioral signal to modulate learning
let behavioral_signal = 1.0; // Reward/plateau potential
for i in 0..num_outputs {
for j in 0..num_inputs {
weights[i][j] += learning_rate * behavioral_signal * eligibility[i][j];
}
}
&weights
});
});
group.finish();
}
fn bench_spiking_neurons(c: &mut Criterion) {
let mut group = c.benchmark_group("spiking");
// LIF neuron simulation
group.bench_function("lif_neuron_1000steps", |b| {
let threshold = 1.0;
let tau_m = 10.0;
let dt = 1.0;
let input_current = 0.15;
b.iter(|| {
let mut voltage = 0.0f32;
let mut spike_count = 0u32;
for _ in 0..1000 {
// Leaky integration
voltage += (-voltage / tau_m + input_current) * dt;
// Spike and reset
if voltage >= threshold {
spike_count += 1;
voltage = 0.0;
}
}
spike_count
});
});
// Spiking network simulation
for num_neurons in [100, 500, 1000].iter() {
let connectivity = 0.1; // 10% connectivity
let num_connections = (*num_neurons as f32 * *num_neurons as f32 * connectivity) as usize;
// Pre-generate connections
let connections: Vec<(usize, usize, f32)> = (0..num_connections)
.map(|i| {
let pre = i % *num_neurons;
let post = (i * 7 + 3) % *num_neurons;
let weight = 0.1;
(pre, post, weight)
})
.collect();
group.throughput(Throughput::Elements(*num_neurons as u64));
group.bench_with_input(
BenchmarkId::new("network_step", num_neurons),
&(&connections, num_neurons),
|b, (conns, n)| {
b.iter(|| {
let mut voltages = vec![0.0f32; **n];
let mut spikes = vec![false; **n];
let threshold = 1.0;
let tau_m = 10.0;
// Input current
let input: Vec<f32> = (0..**n).map(|i| 0.1 + 0.01 * (i as f32)).collect();
// Integrate
for i in 0..**n {
voltages[i] += (-voltages[i] / tau_m + input[i]);
}
// Propagate spikes from previous step
for (pre, post, weight) in conns.iter() {
if spikes[*pre] {
voltages[*post] += weight;
}
}
// Generate spikes
for i in 0..**n {
spikes[i] = voltages[i] >= threshold;
if spikes[i] {
voltages[i] = 0.0;
}
}
(voltages, spikes)
});
},
);
}
group.finish();
}
fn bench_stdp(c: &mut Criterion) {
let mut group = c.benchmark_group("stdp");
let num_synapses = 10000;
let a_plus = 0.01;
let a_minus = 0.012;
let tau_plus = 20.0;
let tau_minus = 20.0;
// Spike times
let pre_spike_times: Vec<f32> = (0..num_synapses)
.map(|i| (i % 100) as f32)
.collect();
let post_spike_times: Vec<f32> = (0..num_synapses)
.map(|i| ((i + 10) % 100) as f32)
.collect();
let mut weights = vec![0.5f32; num_synapses];
group.throughput(Throughput::Elements(num_synapses as u64));
group.bench_function("weight_update", |b| {
b.iter(|| {
for i in 0..num_synapses {
let dt = post_spike_times[i] - pre_spike_times[i];
let delta_w = if dt > 0.0 {
// Potentiation
a_plus * (-dt / tau_plus).exp()
} else {
// Depression
-a_minus * (dt / tau_minus).exp()
};
weights[i] = (weights[i] + delta_w).max(0.0).min(1.0);
}
weights.clone()
});
});
group.finish();
}
fn bench_reservoir_computing(c: &mut Criterion) {
let mut group = c.benchmark_group("reservoir");
let input_dim = 10;
let reservoir_size = 500;
let output_dim = 5;
let seq_len = 100;
// Generate reservoir weights (sparse)
let sparsity = 0.1;
let num_connections = (reservoir_size as f32 * reservoir_size as f32 * sparsity) as usize;
let reservoir_weights: Vec<(usize, usize, f32)> = (0..num_connections)
.map(|i| {
let pre = i % reservoir_size;
let post = (i * 17 + 5) % reservoir_size;
let weight = 0.1 * (((i * 7) % 100) as f32 / 50.0 - 1.0);
(pre, post, weight)
})
.collect();
// Input weights
let input_weights: Vec<Vec<f32>> = (0..reservoir_size)
.map(|i| random_vector(input_dim, i as u64))
.collect();
// Input sequence
let input_sequence: Vec<Vec<f32>> = (0..seq_len)
.map(|i| random_vector(input_dim, i as u64))
.collect();
group.throughput(Throughput::Elements(seq_len as u64));
group.bench_function("run_sequence", |b| {
b.iter(|| {
let mut state = vec![0.0f32; reservoir_size];
let mut states = Vec::with_capacity(seq_len);
for input in &input_sequence {
// Input contribution
let mut new_state = vec![0.0f32; reservoir_size];
for i in 0..reservoir_size {
for j in 0..input_dim {
new_state[i] += input_weights[i][j] * input[j];
}
}
// Recurrent contribution
for (pre, post, weight) in &reservoir_weights {
new_state[*post] += weight * state[*pre];
}
// Nonlinearity
for s in &mut new_state {
*s = s.tanh();
}
state = new_state;
states.push(state.clone());
}
states
});
});
group.finish();
}
criterion_group!(
benches,
bench_hdc_operations,
bench_hdc_encoding,
bench_btsp,
bench_spiking_neurons,
bench_stdp,
bench_reservoir_computing
);
criterion_main!(benches);

View File

@@ -0,0 +1,575 @@
// Plaid ZK Proof & Learning Performance Benchmarks
//
// Run with: cargo bench --bench plaid_performance
//
// Expected results:
// - Proof generation: ~8μs per proof (32-bit range)
// - Transaction processing: ~1.5μs per transaction
// - Feature extraction: ~0.1μs
// - LSH hashing: ~0.05μs
use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId, Throughput};
use ruvector_edge::plaid::*;
use ruvector_edge::plaid::zkproofs::{RangeProof, PedersenCommitment, FinancialProofBuilder};
use std::collections::HashMap;
// ============================================================================
// Proof Generation Benchmarks
// ============================================================================
fn bench_proof_generation(c: &mut Criterion) {
let mut group = c.benchmark_group("proof_generation");
// Test different range sizes (affects bit count and proof complexity)
for range_bits in [8, 16, 32, 64] {
let max = if range_bits == 64 {
u64::MAX / 2 // Avoid overflow
} else {
(1u64 << range_bits) - 1
};
let value = max / 2;
let blinding = PedersenCommitment::random_blinding();
group.throughput(Throughput::Elements(1));
group.bench_with_input(
BenchmarkId::new("range_proof", range_bits),
&(value, max, blinding),
|b, (v, m, bl)| {
b.iter(|| {
RangeProof::prove(
black_box(*v),
0,
black_box(*m),
bl,
)
});
},
);
}
group.finish();
}
fn bench_proof_verification(c: &mut Criterion) {
let mut group = c.benchmark_group("proof_verification");
// Pre-generate proofs of different sizes
let proofs: Vec<_> = [8, 16, 32, 64]
.iter()
.map(|&bits| {
let max = if bits == 64 {
u64::MAX / 2
} else {
(1u64 << bits) - 1
};
let value = max / 2;
let blinding = PedersenCommitment::random_blinding();
(bits, RangeProof::prove(value, 0, max, &blinding).unwrap())
})
.collect();
for (bits, proof) in &proofs {
group.throughput(Throughput::Elements(1));
group.bench_with_input(
BenchmarkId::new("verify", bits),
proof,
|b, p| {
b.iter(|| RangeProof::verify(black_box(p)));
},
);
}
group.finish();
}
fn bench_pedersen_commitment(c: &mut Criterion) {
let mut group = c.benchmark_group("pedersen_commitment");
let value = 50000u64;
let blinding = PedersenCommitment::random_blinding();
group.bench_function("commit", |b| {
b.iter(|| {
PedersenCommitment::commit(black_box(value), black_box(&blinding))
});
});
group.bench_function("verify_opening", |b| {
let commitment = PedersenCommitment::commit(value, &blinding);
b.iter(|| {
PedersenCommitment::verify_opening(
black_box(&commitment),
black_box(value),
black_box(&blinding),
)
});
});
group.finish();
}
fn bench_financial_proofs(c: &mut Criterion) {
let mut group = c.benchmark_group("financial_proofs");
let builder = FinancialProofBuilder::new()
.with_income(vec![6500, 6500, 6800, 6500])
.with_balances(vec![5000, 5200, 4800, 5100, 5300, 5000, 5500]);
group.bench_function("prove_income_above", |b| {
b.iter(|| {
builder.prove_income_above(black_box(5000))
});
});
group.bench_function("prove_affordability", |b| {
b.iter(|| {
builder.prove_affordability(black_box(2000), black_box(3))
});
});
group.bench_function("prove_no_overdrafts", |b| {
b.iter(|| {
builder.prove_no_overdrafts(black_box(30))
});
});
group.bench_function("prove_savings_above", |b| {
b.iter(|| {
builder.prove_savings_above(black_box(4000))
});
});
group.finish();
}
// ============================================================================
// Learning Algorithm Benchmarks
// ============================================================================
fn bench_feature_extraction(c: &mut Criterion) {
let mut group = c.benchmark_group("feature_extraction");
let tx = Transaction {
transaction_id: "tx123".to_string(),
account_id: "acc456".to_string(),
amount: 50.0,
date: "2024-03-15".to_string(),
name: "Starbucks Coffee Shop".to_string(),
merchant_name: Some("Starbucks".to_string()),
category: vec!["Food".to_string(), "Coffee".to_string()],
pending: false,
payment_channel: "in_store".to_string(),
};
group.throughput(Throughput::Elements(1));
group.bench_function("extract_features", |b| {
b.iter(|| extract_features(black_box(&tx)));
});
group.bench_function("to_embedding", |b| {
let features = extract_features(&tx);
b.iter(|| features.to_embedding());
});
group.bench_function("full_pipeline", |b| {
b.iter(|| {
let features = extract_features(black_box(&tx));
features.to_embedding()
});
});
group.finish();
}
fn bench_lsh_hashing(c: &mut Criterion) {
let mut group = c.benchmark_group("lsh_hashing");
let test_cases = vec![
("Short", "Starbucks"),
("Medium", "Amazon.com Services LLC"),
("Long", "Whole Foods Market Store #12345 Manhattan"),
("VeryLong", "Shell Gas Station #12345 - 123 Main Street, City Name, State 12345"),
];
for (name, text) in &test_cases {
group.throughput(Throughput::Bytes(text.len() as u64));
group.bench_with_input(
BenchmarkId::new("simple_lsh", name),
text,
|b, t| {
b.iter(|| {
// LSH is internal, so we extract features which calls it
let tx = Transaction {
transaction_id: "tx".to_string(),
account_id: "acc".to_string(),
amount: 50.0,
date: "2024-01-01".to_string(),
name: t.to_string(),
merchant_name: Some(t.to_string()),
category: vec!["Test".to_string()],
pending: false,
payment_channel: "online".to_string(),
};
extract_features(black_box(&tx))
});
},
);
}
group.finish();
}
fn bench_q_learning(c: &mut Criterion) {
let mut group = c.benchmark_group("q_learning");
let mut state = FinancialLearningState::default();
// Pre-populate with some Q-values
for i in 0..100 {
let key = format!("category_{}|under_budget", i % 10);
state.q_values.insert(key, 0.5 + (i as f64 * 0.01));
}
group.bench_function("update_q_value", |b| {
b.iter(|| {
update_q_value(
black_box(&state),
"Food",
"under_budget",
1.0,
0.1,
)
});
});
group.bench_function("get_recommendation", |b| {
b.iter(|| {
get_recommendation(
black_box(&state),
"Food",
500.0,
600.0,
)
});
});
group.bench_function("q_value_lookup", |b| {
b.iter(|| {
black_box(&state).q_values.get("category_5|under_budget")
});
});
group.finish();
}
// ============================================================================
// End-to-End Transaction Processing
// ============================================================================
fn bench_transaction_processing(c: &mut Criterion) {
let mut group = c.benchmark_group("transaction_processing");
// Test different batch sizes
for batch_size in [1, 10, 100, 1000] {
let transactions: Vec<Transaction> = (0..batch_size)
.map(|i| Transaction {
transaction_id: format!("tx{}", i),
account_id: "acc456".to_string(),
amount: 50.0 + (i as f64 % 100.0),
date: format!("2024-03-{:02}", (i % 28) + 1),
name: format!("Merchant {}", i % 20),
merchant_name: Some(format!("Merchant {}", i % 20)),
category: vec![
format!("Category {}", i % 5),
"Subcategory".to_string()
],
pending: false,
payment_channel: if i % 2 == 0 { "in_store" } else { "online" }.to_string(),
})
.collect();
group.throughput(Throughput::Elements(batch_size as u64));
group.bench_with_input(
BenchmarkId::new("feature_extraction_batch", batch_size),
&transactions,
|b, txs| {
b.iter(|| {
for tx in txs {
let _ = extract_features(black_box(tx));
}
});
},
);
group.bench_with_input(
BenchmarkId::new("full_pipeline_batch", batch_size),
&transactions,
|b, txs| {
b.iter(|| {
for tx in txs {
let features = extract_features(black_box(tx));
let _ = features.to_embedding();
}
});
},
);
}
group.finish();
}
// ============================================================================
// Serialization Benchmarks
// ============================================================================
fn bench_serialization(c: &mut Criterion) {
let mut group = c.benchmark_group("serialization");
// Create states with varying sizes
for tx_count in [100, 1000, 10000] {
let mut state = FinancialLearningState::default();
// Populate state to simulate real usage
for i in 0..tx_count {
let category_key = format!("category_{}", i % 10);
let pattern = SpendingPattern {
pattern_id: format!("pat_{}", i),
category: category_key.clone(),
avg_amount: 50.0 + (i as f64 % 100.0),
frequency_days: 7.0,
confidence: 0.8,
last_seen: i,
};
state.patterns.insert(category_key.clone(), pattern);
// Add Q-values
let q_key = format!("{}|under_budget", category_key);
state.q_values.insert(q_key, 0.5 + (i as f64 * 0.001));
// Add embedding (this will expose the memory leak!)
state.category_embeddings.push((
category_key,
vec![0.1 * (i as f32 % 10.0); 21]
));
}
state.version = tx_count;
let json_string = serde_json::to_string(&state).unwrap();
let state_size = json_string.len();
group.throughput(Throughput::Bytes(state_size as u64));
group.bench_with_input(
BenchmarkId::new("json_serialize", tx_count),
&state,
|b, s| {
b.iter(|| serde_json::to_string(black_box(s)).unwrap());
},
);
group.bench_with_input(
BenchmarkId::new("json_deserialize", tx_count),
&json_string,
|b, json| {
b.iter(|| {
serde_json::from_str::<FinancialLearningState>(black_box(json)).unwrap()
});
},
);
// Benchmark bincode for comparison
let bincode_data = bincode::serialize(&state).unwrap();
group.bench_with_input(
BenchmarkId::new("bincode_serialize", tx_count),
&state,
|b, s| {
b.iter(|| bincode::serialize(black_box(s)).unwrap());
},
);
group.bench_with_input(
BenchmarkId::new("bincode_deserialize", tx_count),
&bincode_data,
|b, data| {
b.iter(|| {
bincode::deserialize::<FinancialLearningState>(black_box(data)).unwrap()
});
},
);
}
group.finish();
}
// ============================================================================
// Memory Footprint Benchmarks
// ============================================================================
fn bench_memory_footprint(c: &mut Criterion) {
let mut group = c.benchmark_group("memory_footprint");
group.bench_function("proof_size_8bit", |b| {
b.iter_custom(|iters| {
let mut total_size = 0;
let start = std::time::Instant::now();
for _ in 0..iters {
let blinding = PedersenCommitment::random_blinding();
let proof = RangeProof::prove(128, 0, 255, &blinding).unwrap();
let size = bincode::serialize(&proof).unwrap().len();
total_size += size;
black_box(size);
}
println!("Average proof size (8-bit): {} bytes", total_size / iters as usize);
start.elapsed()
});
});
group.bench_function("proof_size_32bit", |b| {
b.iter_custom(|iters| {
let mut total_size = 0;
let start = std::time::Instant::now();
for _ in 0..iters {
let blinding = PedersenCommitment::random_blinding();
let proof = RangeProof::prove(50000, 0, 100000, &blinding).unwrap();
let size = bincode::serialize(&proof).unwrap().len();
total_size += size;
black_box(size);
}
println!("Average proof size (32-bit): {} bytes", total_size / iters as usize);
start.elapsed()
});
});
group.bench_function("state_growth_simulation", |b| {
b.iter_custom(|iters| {
let mut state = FinancialLearningState::default();
let start = std::time::Instant::now();
for i in 0..iters {
// Simulate transaction processing (THIS WILL LEAK MEMORY!)
let key = format!("cat_{}", i % 10);
state.category_embeddings.push((key.clone(), vec![0.0; 21]));
// Also add pattern and Q-value
let pattern = SpendingPattern {
pattern_id: format!("pat_{}", i),
category: key.clone(),
avg_amount: 50.0,
frequency_days: 7.0,
confidence: 0.8,
last_seen: i,
};
state.patterns.insert(key.clone(), pattern);
state.q_values.insert(format!("{}|action", key), 0.5);
}
let size = bincode::serialize(&state).unwrap().len();
println!("State size after {} transactions: {} KB", iters, size / 1024);
println!("Embeddings count: {}", state.category_embeddings.len());
start.elapsed()
});
});
group.finish();
}
// ============================================================================
// Regression Tests (detect performance degradation)
// ============================================================================
fn bench_regression_tests(c: &mut Criterion) {
let mut group = c.benchmark_group("regression_tests");
// These benchmarks establish baseline performance
// CI can fail if they regress significantly
group.bench_function("baseline_proof_32bit", |b| {
let blinding = PedersenCommitment::random_blinding();
b.iter(|| {
RangeProof::prove(black_box(50000), 0, black_box(100000), &blinding)
});
});
group.bench_function("baseline_feature_extraction", |b| {
let tx = Transaction {
transaction_id: "tx".to_string(),
account_id: "acc".to_string(),
amount: 50.0,
date: "2024-01-01".to_string(),
name: "Test".to_string(),
merchant_name: Some("Test Merchant".to_string()),
category: vec!["Food".to_string()],
pending: false,
payment_channel: "online".to_string(),
};
b.iter(|| {
let features = extract_features(black_box(&tx));
features.to_embedding()
});
});
group.bench_function("baseline_json_serialize_1k", |b| {
let mut state = FinancialLearningState::default();
for i in 0..1000 {
let key = format!("cat_{}", i % 10);
state.category_embeddings.push((key, vec![0.0; 21]));
}
b.iter(|| {
serde_json::to_string(black_box(&state))
});
});
group.finish();
}
// ============================================================================
// Benchmark Groups
// ============================================================================
criterion_group!(
proof_benches,
bench_proof_generation,
bench_proof_verification,
bench_pedersen_commitment,
bench_financial_proofs,
);
criterion_group!(
learning_benches,
bench_feature_extraction,
bench_lsh_hashing,
bench_q_learning,
bench_transaction_processing,
);
criterion_group!(
overhead_benches,
bench_serialization,
bench_memory_footprint,
);
criterion_group!(
regression_benches,
bench_regression_tests,
);
criterion_main!(
proof_benches,
learning_benches,
overhead_benches,
regression_benches,
);