Squashed 'vendor/ruvector/' content from commit b64c2172

git-subtree-dir: vendor/ruvector
git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
commit d803bfe2b1
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,92 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ruvector_nervous_system::plasticity::btsp::{BTSPAssociativeMemory, BTSPLayer, BTSPSynapse};
fn bench_synapse_update(c: &mut Criterion) {
let mut group = c.benchmark_group("synapse_update");
let mut synapse = BTSPSynapse::new(0.5, 2000.0).unwrap();
group.bench_function("update_no_plateau", |b| {
b.iter(|| {
synapse.update(black_box(true), black_box(false), black_box(1.0));
});
});
group.bench_function("update_with_plateau", |b| {
b.iter(|| {
synapse.update(black_box(true), black_box(true), black_box(1.0));
});
});
group.finish();
}
fn bench_layer_forward(c: &mut Criterion) {
let mut group = c.benchmark_group("layer_forward");
for size in [100, 1_000, 10_000].iter() {
group.throughput(Throughput::Elements(*size as u64));
let layer = BTSPLayer::new(*size, 2000.0);
let input = vec![0.5; *size];
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| layer.forward(black_box(&input)));
});
}
group.finish();
}
fn bench_one_shot_learning(c: &mut Criterion) {
let mut group = c.benchmark_group("one_shot_learning");
for size in [100, 1_000, 10_000].iter() {
group.throughput(Throughput::Elements(*size as u64));
let mut layer = BTSPLayer::new(*size, 2000.0);
let pattern = vec![0.5; *size];
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| {
layer.one_shot_associate(black_box(&pattern), black_box(0.8));
});
});
}
group.finish();
}
fn bench_associative_memory(c: &mut Criterion) {
let mut group = c.benchmark_group("associative_memory");
let mut memory = BTSPAssociativeMemory::new(128, 64);
let key = vec![0.5; 128];
let value = vec![0.1; 64];
group.bench_function("store_one_shot", |b| {
b.iter(|| {
memory
.store_one_shot(black_box(&key), black_box(&value))
.unwrap();
});
});
group.bench_function("retrieve", |b| {
memory.store_one_shot(&key, &value).unwrap();
b.iter(|| {
memory.retrieve(black_box(&key)).unwrap();
});
});
group.finish();
}
criterion_group!(
benches,
bench_synapse_update,
bench_layer_forward,
bench_one_shot_learning,
bench_associative_memory
);
criterion_main!(benches);

View File

@@ -0,0 +1,119 @@
//! E-prop performance benchmarks
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ruvector_nervous_system::plasticity::eprop::{EpropNetwork, EpropSynapse};
fn bench_synapse_update(c: &mut Criterion) {
let mut group = c.benchmark_group("synapse_update");
let mut synapse = EpropSynapse::new(0.5, 20.0);
group.bench_function("single_update", |b| {
b.iter(|| {
synapse.update(
black_box(true),
black_box(0.5),
black_box(0.1),
black_box(1.0),
black_box(0.01),
);
});
});
group.finish();
}
fn bench_network_forward(c: &mut Criterion) {
let mut group = c.benchmark_group("network_forward");
for &hidden_size in &[100, 500, 1000] {
let mut network = EpropNetwork::new(100, hidden_size, 10);
let input = vec![0.5; 100];
group.throughput(Throughput::Elements(hidden_size as u64));
group.bench_with_input(
BenchmarkId::from_parameter(hidden_size),
&hidden_size,
|b, _| {
b.iter(|| {
network.forward(black_box(&input), black_box(1.0));
});
},
);
}
group.finish();
}
fn bench_network_backward(c: &mut Criterion) {
let mut group = c.benchmark_group("network_backward");
for &hidden_size in &[100, 500, 1000] {
let mut network = EpropNetwork::new(100, hidden_size, 10);
let input = vec![0.5; 100];
let error = vec![0.1; 10];
// Run forward first to populate spike buffer
network.forward(&input, 1.0);
group.throughput(Throughput::Elements(hidden_size as u64));
group.bench_with_input(
BenchmarkId::from_parameter(hidden_size),
&hidden_size,
|b, _| {
b.iter(|| {
network.backward(black_box(&error), black_box(0.01), black_box(1.0));
});
},
);
}
group.finish();
}
fn bench_online_step(c: &mut Criterion) {
let mut group = c.benchmark_group("online_step");
let mut network = EpropNetwork::new(100, 1000, 10);
let input = vec![0.5; 100];
let target = vec![1.0; 10];
group.throughput(Throughput::Elements(1000));
group.bench_function("1000_neurons", |b| {
b.iter(|| {
network.online_step(
black_box(&input),
black_box(&target),
black_box(1.0),
black_box(0.01),
);
});
});
group.finish();
}
fn bench_memory_footprint(c: &mut Criterion) {
let mut group = c.benchmark_group("memory");
for &size in &[100, 500, 1000, 5000] {
group.bench_with_input(BenchmarkId::new("create_network", size), &size, |b, &s| {
b.iter(|| {
let network = EpropNetwork::new(100, s, 10);
black_box(network);
});
});
}
group.finish();
}
criterion_group!(
benches,
bench_synapse_update,
bench_network_forward,
bench_network_backward,
bench_online_step,
bench_memory_footprint
);
criterion_main!(benches);

View File

@@ -0,0 +1,114 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use ruvector_nervous_system::plasticity::consolidate::{ComplementaryLearning, Experience, EWC};
fn bench_fisher_computation(c: &mut Criterion) {
let mut group = c.benchmark_group("fisher_computation");
for size in [1_000, 10_000, 100_000, 1_000_000] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let mut ewc = EWC::new(1000.0);
let params = vec![0.5; size];
let gradients: Vec<Vec<f32>> = (0..50).map(|_| vec![0.1; size]).collect();
b.iter(|| {
ewc.compute_fisher(black_box(&params), black_box(&gradients))
.unwrap();
});
});
}
group.finish();
}
fn bench_ewc_loss(c: &mut Criterion) {
let mut group = c.benchmark_group("ewc_loss");
for size in [1_000, 10_000, 100_000, 1_000_000] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let mut ewc = EWC::new(1000.0);
let params = vec![0.5; size];
let gradients: Vec<Vec<f32>> = (0..50).map(|_| vec![0.1; size]).collect();
ewc.compute_fisher(&params, &gradients).unwrap();
let new_params = vec![0.6; size];
b.iter(|| {
black_box(ewc.ewc_loss(black_box(&new_params)));
});
});
}
group.finish();
}
fn bench_ewc_gradient(c: &mut Criterion) {
let mut group = c.benchmark_group("ewc_gradient");
for size in [1_000, 10_000, 100_000, 1_000_000] {
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, &size| {
let mut ewc = EWC::new(1000.0);
let params = vec![0.5; size];
let gradients: Vec<Vec<f32>> = (0..50).map(|_| vec![0.1; size]).collect();
ewc.compute_fisher(&params, &gradients).unwrap();
let new_params = vec![0.6; size];
b.iter(|| {
black_box(ewc.ewc_gradient(black_box(&new_params)));
});
});
}
group.finish();
}
fn bench_consolidation(c: &mut Criterion) {
let mut group = c.benchmark_group("consolidation");
for buffer_size in [100, 1_000, 10_000] {
group.bench_with_input(
BenchmarkId::from_parameter(buffer_size),
&buffer_size,
|b, &buffer_size| {
let mut cls = ComplementaryLearning::new(1000, buffer_size, 1000.0);
// Fill buffer
for _ in 0..buffer_size {
let exp = Experience::new(vec![1.0; 10], vec![0.5; 10], 1.0);
cls.store_experience(exp);
}
b.iter(|| {
cls.consolidate(black_box(10), black_box(0.01)).unwrap();
});
},
);
}
group.finish();
}
fn bench_experience_storage(c: &mut Criterion) {
let mut group = c.benchmark_group("experience_storage");
let cls = ComplementaryLearning::new(1000, 10_000, 1000.0);
let exp = Experience::new(vec![1.0; 100], vec![0.5; 100], 1.0);
group.bench_function("store_experience", |b| {
b.iter(|| {
cls.store_experience(black_box(exp.clone()));
});
});
group.finish();
}
criterion_group!(
benches,
bench_fisher_computation,
bench_ewc_loss,
bench_ewc_gradient,
bench_consolidation,
bench_experience_storage
);
criterion_main!(benches);

View File

@@ -0,0 +1,139 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use ruvector_nervous_system::hdc::{bind, bundle, HdcMemory, Hypervector};
fn bench_vector_creation(c: &mut Criterion) {
c.bench_function("hypervector_random", |b| {
b.iter(|| {
black_box(Hypervector::random());
});
});
c.bench_function("hypervector_from_seed", |b| {
b.iter(|| {
black_box(Hypervector::from_seed(42));
});
});
}
fn bench_binding(c: &mut Criterion) {
let v1 = Hypervector::random();
let v2 = Hypervector::random();
c.bench_function("bind_two_vectors", |b| {
b.iter(|| {
black_box(v1.bind(&v2));
});
});
c.bench_function("bind_function", |b| {
b.iter(|| {
black_box(bind(&v1, &v2));
});
});
}
fn bench_bundling(c: &mut Criterion) {
let mut group = c.benchmark_group("bundling");
for size in [3, 5, 10, 20, 50].iter() {
let vectors: Vec<_> = (0..*size).map(|_| Hypervector::random()).collect();
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, _| {
b.iter(|| {
black_box(bundle(&vectors).unwrap());
});
});
}
group.finish();
}
fn bench_similarity(c: &mut Criterion) {
let v1 = Hypervector::random();
let v2 = Hypervector::random();
c.bench_function("similarity", |b| {
b.iter(|| {
black_box(v1.similarity(&v2));
});
});
c.bench_function("hamming_distance", |b| {
b.iter(|| {
black_box(v1.hamming_distance(&v2));
});
});
}
fn bench_memory_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("memory");
// Store operation
group.bench_function("store_single", |b| {
let mut memory = HdcMemory::new();
let vector = Hypervector::random();
let mut i = 0;
b.iter(|| {
memory.store(format!("key_{}", i), vector.clone());
i += 1;
});
});
// Retrieve with different memory sizes
for size in [10, 100, 1000, 10_000].iter() {
let mut memory = HdcMemory::with_capacity(*size);
for i in 0..*size {
memory.store(format!("key_{}", i), Hypervector::random());
}
let query = Hypervector::random();
group.bench_with_input(BenchmarkId::new("retrieve", size), size, |b, _| {
b.iter(|| {
black_box(memory.retrieve(&query, 0.8));
});
});
group.bench_with_input(BenchmarkId::new("retrieve_top_k", size), size, |b, _| {
b.iter(|| {
black_box(memory.retrieve_top_k(&query, 10));
});
});
}
group.finish();
}
fn bench_end_to_end(c: &mut Criterion) {
c.bench_function("end_to_end_workflow", |b| {
b.iter(|| {
let mut memory = HdcMemory::new();
// Create and store 100 vectors
for i in 0..100 {
let v = Hypervector::random();
memory.store(format!("item_{}", i), v);
}
// Retrieve similar items
let query = Hypervector::random();
let results = memory.retrieve_top_k(&query, 10);
black_box(results);
});
});
}
criterion_group!(
benches,
bench_vector_creation,
bench_binding,
bench_bundling,
bench_similarity,
bench_memory_operations,
bench_end_to_end
);
criterion_main!(benches);

View File

@@ -0,0 +1,401 @@
// Comprehensive latency benchmarks for RuVector Nervous System components
// Measures P50, P99, P99.9 percentiles for all critical operations
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};
use std::time::Duration;
// Note: Import actual types when implemented
// use ruvector_nervous_system::*;
// ============================================================================
// Event Bus Benchmarks
// ============================================================================
fn benchmark_event_bus(c: &mut Criterion) {
let mut group = c.benchmark_group("event_bus");
group.warm_up_time(Duration::from_secs(1));
group.measurement_time(Duration::from_secs(5));
// Event publish latency (target: <10μs)
group.bench_function("event_publish", |b| {
// let bus = EventBus::new(1000);
// let event = Event::new("test", vec![0.0; 128]);
b.iter(|| {
// bus.publish(black_box(event.clone()))
// Placeholder until EventBus is implemented
black_box(42);
});
});
// Event delivery from bounded queue (target: <5μs)
group.bench_function("bounded_queue_delivery", |b| {
// let queue = BoundedQueue::new(1000);
b.iter(|| {
// queue.pop()
black_box(42);
});
});
// Priority routing (target: <20μs)
group.bench_function("priority_routing", |b| {
// let router = PriorityRouter::new();
// let event = Event::with_priority("test", vec![0.0; 128], Priority::High);
b.iter(|| {
// router.route(black_box(&event))
black_box(42);
});
});
group.finish();
}
// ============================================================================
// HDC (Hyperdimensional Computing) Benchmarks
// ============================================================================
fn benchmark_hdc(c: &mut Criterion) {
let mut group = c.benchmark_group("hdc");
group.warm_up_time(Duration::from_millis(500));
let mut rng = StdRng::seed_from_u64(42);
// Vector binding (target: <100ns)
let vec_a = generate_bitvector(&mut rng, 10000);
let vec_b = generate_bitvector(&mut rng, 10000);
group.bench_function("vector_binding", |bencher| {
bencher.iter(|| xor_bitvectors(black_box(&vec_a), black_box(&vec_b)));
});
// Vector bundling (target: <500ns)
let bundle_vectors: Vec<_> = (0..10)
.map(|_| generate_bitvector(&mut rng, 10000))
.collect();
group.bench_function("vector_bundling", |bencher| {
bencher.iter(|| majority_bitvectors(black_box(&bundle_vectors)));
});
// Hamming distance (target: <100ns)
let ham_a = generate_bitvector(&mut rng, 10000);
let ham_b = generate_bitvector(&mut rng, 10000);
group.bench_function("hamming_distance", |bencher| {
bencher.iter(|| hamming_distance(black_box(&ham_a), black_box(&ham_b)));
});
// Similarity check (target: <200ns)
let sim_a = generate_bitvector(&mut rng, 10000);
let sim_b = generate_bitvector(&mut rng, 10000);
group.bench_function("similarity_check", |bencher| {
bencher.iter(|| hdc_similarity(black_box(&sim_a), black_box(&sim_b)));
});
group.finish();
}
// ============================================================================
// WTA (Winner-Take-All) Benchmarks
// ============================================================================
fn benchmark_wta(c: &mut Criterion) {
let mut group = c.benchmark_group("wta");
let mut rng = StdRng::seed_from_u64(42);
// Single winner selection (target: <1μs)
group.bench_function("single_winner", |b| {
let inputs: Vec<f32> = (0..1000).map(|_| rng.gen()).collect();
b.iter(|| {
// wta.select_winner(black_box(&inputs))
argmax(black_box(&inputs))
});
});
// k-WTA with k=5 (target: <5μs)
group.bench_function("k_wta_5", |b| {
let inputs: Vec<f32> = (0..1000).map(|_| rng.gen()).collect();
b.iter(|| {
// wta.select_k_winners(black_box(&inputs), 5)
argmax_k(black_box(&inputs), 5)
});
});
// Lateral inhibition update (target: <10μs)
group.bench_function("lateral_inhibition", |b| {
let inputs: Vec<f32> = (0..1000).map(|_| rng.gen()).collect();
b.iter(|| {
// lateral_inhibition.update(black_box(&inputs))
apply_inhibition(black_box(&inputs), 0.5)
});
});
group.finish();
}
// ============================================================================
// Hopfield Network Benchmarks
// ============================================================================
fn benchmark_hopfield(c: &mut Criterion) {
let mut group = c.benchmark_group("hopfield");
group.warm_up_time(Duration::from_secs(1));
group.measurement_time(Duration::from_secs(5));
let mut rng = StdRng::seed_from_u64(42);
// Pattern retrieval with varying pattern counts
for num_patterns in [10, 100, 1000].iter() {
group.bench_with_input(
BenchmarkId::new("pattern_retrieval", num_patterns),
num_patterns,
|b, &num_patterns| {
// let hopfield = ModernHopfield::new(512, 100.0);
// for _ in 0..num_patterns {
// hopfield.store(generate_random_vector(&mut rng, 512));
// }
let query = generate_random_vector(&mut rng, 512);
b.iter(|| {
// hopfield.retrieve(black_box(&query))
// Placeholder
black_box(query.clone())
});
},
);
}
// Pattern storage (target: <100μs)
group.bench_function("pattern_storage", |b| {
// let hopfield = ModernHopfield::new(512, 100.0);
let pattern = generate_random_vector(&mut rng, 512);
b.iter(|| {
// hopfield.store(black_box(pattern.clone()))
black_box(42);
});
});
// Energy computation (target: <50μs)
group.bench_function("energy_computation", |b| {
// let hopfield = ModernHopfield::new(512, 100.0);
let state = generate_random_vector(&mut rng, 512);
b.iter(|| {
// hopfield.energy(black_box(&state))
compute_energy_placeholder(black_box(&state))
});
});
group.finish();
}
// ============================================================================
// Pattern Separation Benchmarks
// ============================================================================
fn benchmark_pattern_separation(c: &mut Criterion) {
let mut group = c.benchmark_group("pattern_separation");
let mut rng = StdRng::seed_from_u64(42);
// Encoding with orthogonalization (target: <500μs)
group.bench_function("orthogonal_encoding", |b| {
// let encoder = PatternSeparator::new(512);
let input = generate_random_vector(&mut rng, 512);
b.iter(|| {
// encoder.encode(black_box(&input))
orthogonalize_placeholder(black_box(&input))
});
});
// Collision detection (target: <100μs)
group.bench_function("collision_detection", |b| {
let encoded = generate_random_vector(&mut rng, 512);
b.iter(|| {
// detector.check_collision(black_box(&encoded))
black_box(false)
});
});
// Decorrelation (target: <200μs)
group.bench_function("decorrelation", |b| {
let input = generate_random_vector(&mut rng, 512);
b.iter(|| {
// decorrelator.decorrelate(black_box(&input))
decorrelate_placeholder(black_box(&input))
});
});
group.finish();
}
// ============================================================================
// Plasticity Benchmarks
// ============================================================================
fn benchmark_plasticity(c: &mut Criterion) {
let mut group = c.benchmark_group("plasticity");
let mut rng = StdRng::seed_from_u64(42);
// E-prop gradient update (target: <100μs)
group.bench_function("eprop_gradient_update", |b| {
// let eprop = EPropLearner::new(100, 0.01);
let gradient = generate_random_vector(&mut rng, 100);
b.iter(|| {
// eprop.update_gradients(black_box(&gradient))
black_box(42);
});
});
// BTSP eligibility trace (target: <50μs)
group.bench_function("btsp_eligibility_trace", |b| {
// let btsp = BTSPLearner::new(100, 0.01, 1000);
b.iter(|| {
// btsp.update_eligibility(black_box(0.5))
black_box(42);
});
});
// EWC Fisher matrix update (target: <1ms)
group.bench_function("ewc_fisher_update", |b| {
// let ewc = EWCLearner::new(1000);
let gradient = generate_random_vector(&mut rng, 1000);
b.iter(|| {
// ewc.update_fisher(black_box(&gradient))
black_box(42);
});
});
group.finish();
}
// ============================================================================
// Cognitum Integration Benchmarks
// ============================================================================
fn benchmark_cognitum(c: &mut Criterion) {
let mut group = c.benchmark_group("cognitum");
// Reflex event→action (target: <100μs)
group.bench_function("reflex_latency", |b| {
// let reflex = ReflexArc::new();
// let event = Event::new("sensor", vec![0.5; 128]);
b.iter(|| {
// reflex.process(black_box(&event))
black_box(42);
});
});
// v0 adapter dispatch (target: <50μs)
group.bench_function("v0_adapter_dispatch", |b| {
// let adapter = V0Adapter::new();
b.iter(|| {
// adapter.dispatch(black_box(&event))
black_box(42);
});
});
group.finish();
}
// ============================================================================
// Helper Functions (Placeholders)
// ============================================================================
fn generate_bitvector(rng: &mut StdRng, size: usize) -> Vec<u64> {
(0..(size + 63) / 64).map(|_| rng.gen()).collect()
}
fn generate_random_vector(rng: &mut StdRng, size: usize) -> Vec<f32> {
(0..size).map(|_| rng.gen()).collect()
}
fn xor_bitvectors(a: &[u64], b: &[u64]) -> Vec<u64> {
a.iter().zip(b.iter()).map(|(x, y)| x ^ y).collect()
}
fn majority_bitvectors(vectors: &[Vec<u64>]) -> Vec<u64> {
let len = vectors[0].len();
(0..len)
.map(|i| vectors.iter().map(|v| v[i]).fold(0u64, |acc, x| acc ^ x))
.collect()
}
fn hamming_distance(a: &[u64], b: &[u64]) -> u32 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x ^ y).count_ones())
.sum()
}
fn hdc_similarity(a: &[u64], b: &[u64]) -> f32 {
let total_bits = a.len() * 64;
let hamming = hamming_distance(a, b) as f32;
1.0 - (hamming / total_bits as f32)
}
fn argmax(inputs: &[f32]) -> usize {
inputs
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
.unwrap()
.0
}
fn argmax_k(inputs: &[f32], k: usize) -> Vec<usize> {
let mut indexed: Vec<_> = inputs.iter().enumerate().collect();
indexed.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
indexed.iter().take(k).map(|(i, _)| *i).collect()
}
fn apply_inhibition(inputs: &[f32], strength: f32) -> Vec<f32> {
let max_val = inputs.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
inputs
.iter()
.map(|&x| (x - strength * max_val).max(0.0))
.collect()
}
fn compute_energy_placeholder(state: &[f32]) -> f32 {
-state.iter().map(|x| x * x).sum::<f32>()
}
fn orthogonalize_placeholder(input: &[f32]) -> Vec<f32> {
let norm: f32 = input.iter().map(|x| x * x).sum::<f32>().sqrt();
input.iter().map(|x| x / norm).collect()
}
fn decorrelate_placeholder(input: &[f32]) -> Vec<f32> {
let mean: f32 = input.iter().sum::<f32>() / input.len() as f32;
input.iter().map(|x| x - mean).collect()
}
// ============================================================================
// Criterion Configuration
// ============================================================================
criterion_group!(
benches,
benchmark_event_bus,
benchmark_hdc,
benchmark_wta,
benchmark_hopfield,
benchmark_pattern_separation,
benchmark_plasticity,
benchmark_cognitum,
);
criterion_main!(benches);

View File

@@ -0,0 +1,64 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use ruvector_nervous_system::DentateGyrus;
fn bench_encoding(c: &mut Criterion) {
let mut group = c.benchmark_group("dentate_gyrus_encoding");
// Benchmark different input dimensions
for input_dim in [128, 256, 512].iter() {
let dg = DentateGyrus::new(*input_dim, 10000, 200, 42);
let input: Vec<f32> = (0..*input_dim).map(|i| (i as f32).sin()).collect();
group.bench_with_input(BenchmarkId::from_parameter(input_dim), input_dim, |b, _| {
b.iter(|| black_box(dg.encode(black_box(&input))));
});
}
group.finish();
}
fn bench_similarity(c: &mut Criterion) {
let dg = DentateGyrus::new(512, 10000, 200, 42);
let input1: Vec<f32> = (0..512).map(|i| (i as f32).sin()).collect();
let input2: Vec<f32> = (0..512).map(|i| (i as f32).cos()).collect();
let sparse1 = dg.encode(&input1);
let sparse2 = dg.encode(&input2);
c.bench_function("jaccard_similarity", |b| {
b.iter(|| black_box(sparse1.jaccard_similarity(black_box(&sparse2))));
});
c.bench_function("hamming_distance", |b| {
b.iter(|| black_box(sparse1.hamming_distance(black_box(&sparse2))));
});
}
fn bench_sparsity_levels(c: &mut Criterion) {
let mut group = c.benchmark_group("sparsity_levels");
for sparsity_pct in [2, 3, 5].iter() {
let k = (10000 * sparsity_pct) / 100;
let dg = DentateGyrus::new(512, 10000, k, 42);
let input: Vec<f32> = (0..512).map(|i| (i as f32).sin()).collect();
group.bench_with_input(
BenchmarkId::from_parameter(format!("{}%", sparsity_pct)),
sparsity_pct,
|b, _| {
b.iter(|| black_box(dg.encode(black_box(&input))));
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_encoding,
bench_similarity,
bench_sparsity_levels
);
criterion_main!(benches);