Squashed 'vendor/ruvector/' content from commit b64c2172

git-subtree-dir: vendor/ruvector
git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
commit d803bfe2b1
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,202 @@
// Neural Field Benchmark - Memory-mapped operations performance
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use demand_paged_cognition::*;
use tempfile::NamedTempFile;
fn bench_hash_address(c: &mut Criterion) {
let temp = NamedTempFile::new().unwrap();
let field = MmapNeuralField::new(
temp.path(),
1024 * 1024 * 1024, // 1 GB
Some(4 * 1024 * 1024), // 4 MB pages
)
.unwrap();
let mut group = c.benchmark_group("hash_address");
for size in [4, 16, 64, 256, 1024].iter() {
group.throughput(Throughput::Elements(*size as u64));
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
let concept = vec![0.1f32; size];
b.iter(|| field.hash_address(black_box(&concept)));
});
}
group.finish();
}
fn bench_read_write(c: &mut Criterion) {
let temp = NamedTempFile::new().unwrap();
let field = MmapNeuralField::new(
temp.path(),
1024 * 1024 * 1024, // 1 GB
Some(4 * 1024 * 1024),
)
.unwrap();
let mut group = c.benchmark_group("read_write");
for size in [64, 256, 1024, 4096].iter() {
group.throughput(Throughput::Bytes((*size * 4) as u64)); // f32 = 4 bytes
// Write benchmark
group.bench_with_input(BenchmarkId::new("write", size), size, |b, &size| {
let data = vec![1.0f32; size];
b.iter(|| field.write(black_box(0), black_box(&data)).unwrap());
});
// Read benchmark
field.write(0, &vec![1.0f32; *size]).unwrap();
group.bench_with_input(BenchmarkId::new("read", size), size, |b, &size| {
b.iter(|| field.read(black_box(0), black_box(size)).unwrap());
});
}
group.finish();
}
fn bench_lazy_layer_forward(c: &mut Criterion) {
let temp = NamedTempFile::new().unwrap();
let storage = std::sync::Arc::new(
MmapNeuralField::new(temp.path(), 1024 * 1024 * 1024, Some(4096)).unwrap(),
);
let mut group = c.benchmark_group("lazy_layer");
for (input_dim, output_dim) in [(10, 10), (100, 100), (256, 256), (512, 512)].iter() {
// Initialize weights
let weights = vec![0.1f32; input_dim * output_dim];
let bias = vec![0.01f32; *output_dim];
storage.write(0, &weights).unwrap();
storage.write((weights.len() * 4) as u64, &bias).unwrap();
let mut layer = LazyLayer::new(
0,
(weights.len() * 4) as u64,
*input_dim,
*output_dim,
storage.clone(),
);
group.throughput(Throughput::Elements((*input_dim * *output_dim) as u64));
group.bench_with_input(
BenchmarkId::new("forward", format!("{}x{}", input_dim, output_dim)),
&(*input_dim, *output_dim),
|b, &(input_dim, _)| {
let input = vec![1.0f32; input_dim];
b.iter(|| layer.forward(black_box(&input)).unwrap());
},
);
}
group.finish();
}
fn bench_tiered_memory(c: &mut Criterion) {
let mut group = c.benchmark_group("tiered_memory");
// Promotion benchmark
group.bench_function("promote_l4_to_l1", |b| {
b.iter_with_setup(
|| {
let mut memory = TieredMemory::new();
let page = Page::new(1, vec![1.0; 1024], Tier::L4Hdd);
memory.insert(page).unwrap();
memory
},
|mut memory| memory.promote(1, Tier::L1Dram, "bench").unwrap(),
);
});
// Load benchmark (includes promotion)
group.bench_function("load_page", |b| {
b.iter_with_setup(
|| {
let mut memory = TieredMemory::new();
let page = Page::new(1, vec![1.0; 1024], Tier::L4Hdd);
memory.insert(page).unwrap();
memory
},
|mut memory| memory.load(1).unwrap(),
);
});
group.finish();
}
fn bench_prefetch_prediction(c: &mut Criterion) {
let mut group = c.benchmark_group("prefetch");
// Hoeffding Tree prediction
group.bench_function("hoeffding_predict", |b| {
let predictor = HoeffdingTreePredictor::new();
// Train with some data
for i in 0..100 {
let page = (i % 10) as u64;
let features = AccessFeatures::new(page);
predictor.update(page, &features);
}
let features = AccessFeatures::new(5);
b.iter(|| predictor.predict(black_box(&features), black_box(10)));
});
// Markov prediction
group.bench_function("markov_predict", |b| {
let predictor = MarkovPredictor::new();
// Build transition pattern
for _ in 0..10 {
predictor.update(1, 2);
predictor.update(2, 3);
predictor.update(3, 1);
}
b.iter(|| predictor.predict(black_box(1), black_box(10)));
});
// Coordinator
group.bench_function("coordinator_predict", |b| {
let coordinator = PrefetchCoordinator::new();
let context = vec![0.1, 0.2, 0.3];
// Record some history
for i in 0..50 {
coordinator.record_access(i, &context);
}
b.iter(|| coordinator.predict_and_queue(black_box(50), black_box(&context), black_box(5)));
});
group.finish();
}
fn bench_dpnc_system(c: &mut Criterion) {
let mut group = c.benchmark_group("dpnc_system");
group.sample_size(50); // Reduce sample size for expensive operations
group.bench_function("full_query", |b| {
b.iter_with_setup(
|| {
let temp = NamedTempFile::new().unwrap();
let config = DPNCConfig::default();
DPNC::new(temp.path(), config).unwrap()
},
|mut dpnc| {
let concept = vec![0.1, 0.2, 0.3, 0.4];
dpnc.query(black_box(&concept)).unwrap()
},
);
});
group.finish();
}
criterion_group!(
benches,
bench_hash_address,
bench_read_write,
bench_lazy_layer_forward,
bench_tiered_memory,
bench_prefetch_prediction,
bench_dpnc_system
);
criterion_main!(benches);

View File

@@ -0,0 +1,139 @@
// Prefetch Prediction Benchmark - Accuracy and performance metrics
use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId};
use demand_paged_cognition::*;
fn bench_prefetch_accuracy(c: &mut Criterion) {
let mut group = c.benchmark_group("prefetch_accuracy");
// Sequential pattern
group.bench_function("sequential_pattern", |b| {
b.iter_with_setup(
|| PrefetchCoordinator::new(),
|coordinator| {
let context = vec![0.1, 0.2, 0.3];
// Build sequential pattern
for i in 0..100 {
coordinator.record_access(i, &context);
}
// Predict next
let predictions = coordinator.predict_and_queue(100, &context, 10);
black_box(predictions)
},
);
});
// Random pattern
group.bench_function("random_pattern", |b| {
b.iter_with_setup(
|| {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let coordinator = PrefetchCoordinator::new();
let context = vec![0.1, 0.2, 0.3];
// Build pseudo-random pattern
for i in 0..100 {
let mut hasher = DefaultHasher::new();
i.hash(&mut hasher);
let page = (hasher.finish() % 1000) as u64;
coordinator.record_access(page, &context);
}
coordinator
},
|coordinator| {
let context = vec![0.1, 0.2, 0.3];
let predictions = coordinator.predict_and_queue(500, &context, 10);
black_box(predictions)
},
);
});
// Cyclic pattern
group.bench_function("cyclic_pattern", |b| {
b.iter_with_setup(
|| {
let coordinator = PrefetchCoordinator::new();
let context = vec![0.1, 0.2, 0.3];
// Build cyclic pattern: 1->2->3->4->1
for _ in 0..25 {
coordinator.record_access(1, &context);
coordinator.record_access(2, &context);
coordinator.record_access(3, &context);
coordinator.record_access(4, &context);
}
coordinator
},
|coordinator| {
let context = vec![0.1, 0.2, 0.3];
let predictions = coordinator.predict_and_queue(4, &context, 5);
black_box(predictions)
},
);
});
group.finish();
}
fn bench_streaming_learning(c: &mut Criterion) {
let mut group = c.benchmark_group("streaming_learning");
// Hoeffding Tree update
group.bench_function("hoeffding_update", |b| {
let predictor = HoeffdingTreePredictor::new();
let features = AccessFeatures::new(42);
b.iter(|| {
predictor.update(black_box(42), black_box(&features))
});
});
// Markov update
group.bench_function("markov_update", |b| {
let predictor = MarkovPredictor::new();
b.iter(|| {
predictor.update(black_box(1), black_box(2))
});
});
group.finish();
}
fn bench_feature_extraction(c: &mut Criterion) {
let mut group = c.benchmark_group("feature_extraction");
for history_len in [10, 50, 100].iter() {
group.bench_with_input(
BenchmarkId::from_parameter(history_len),
history_len,
|b, &history_len| {
let history: Vec<u64> = (0..history_len).collect();
let context = vec![0.1, 0.2, 0.3, 0.4, 0.5];
b.iter(|| {
let features = AccessFeatures::from_history(
black_box(&history),
black_box(&context),
);
black_box(features.to_vector())
});
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_prefetch_accuracy,
bench_streaming_learning,
bench_feature_extraction
);
criterion_main!(benches);