Squashed 'vendor/ruvector/' content from commit b64c2172

git-subtree-dir: vendor/ruvector
git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
commit d803bfe2b1
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,184 @@
//! Example of using different embedding providers with AgenticDB
//!
//! Run with:
//! ```bash
//! # Default hash-based (testing only)
//! cargo run --example embeddings_example
//!
//! # With OpenAI API (requires OPENAI_API_KEY env var)
//! OPENAI_API_KEY=sk-... cargo run --example embeddings_example --features real-embeddings
//! ```
use ruvector_core::types::DbOptions;
use ruvector_core::{AgenticDB, ApiEmbedding, HashEmbedding};
use std::sync::Arc;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("=== AgenticDB Embeddings Example ===\n");
// Determine which provider to use
let use_api = std::env::var("OPENAI_API_KEY").is_ok();
let (db, provider_name) = if use_api {
println!("Using OpenAI API embeddings (real semantic search)");
let api_key = std::env::var("OPENAI_API_KEY")?;
let provider = Arc::new(ApiEmbedding::openai(&api_key, "text-embedding-3-small"));
let mut options = DbOptions::default();
options.dimensions = 1536; // OpenAI text-embedding-3-small
options.storage_path = "/tmp/agenticdb_api.db".to_string();
let db = AgenticDB::with_embedding_provider(options, provider)?;
(db, "OpenAI API")
} else {
println!("Using hash-based embeddings (testing only - not semantic)");
println!("Set OPENAI_API_KEY to use real embeddings\n");
let mut options = DbOptions::default();
options.dimensions = 128;
options.storage_path = "/tmp/agenticdb_hash.db".to_string();
let db = AgenticDB::new(options)?;
(db, "Hash-based")
};
println!("Provider: {}\n", db.embedding_provider_name());
// Store some reflexion episodes
println!("--- Storing Reflexion Episodes ---");
let ep1 = db.store_episode(
"Fix Rust borrow checker error".to_string(),
vec![
"Identified lifetime issue".to_string(),
"Added explicit lifetime annotations".to_string(),
"Refactored to use references".to_string(),
],
vec!["Code compiles now".to_string()],
"Should explain borrow checker rules better".to_string(),
)?;
println!(
"✓ Stored episode: Fix Rust borrow checker error (ID: {})",
ep1
);
let ep2 = db.store_episode(
"Optimize Python data processing".to_string(),
vec![
"Profiled with cProfile".to_string(),
"Vectorized with NumPy".to_string(),
"Parallelized with multiprocessing".to_string(),
],
vec!["10x performance improvement".to_string()],
"Could have used Pandas for better readability".to_string(),
)?;
println!(
"✓ Stored episode: Optimize Python data processing (ID: {})",
ep2
);
let ep3 = db.store_episode(
"Debug JavaScript async issue".to_string(),
vec![
"Added console.log statements".to_string(),
"Used Chrome DevTools debugger".to_string(),
"Fixed Promise chain".to_string(),
],
vec!["Race condition resolved".to_string()],
"Should use async/await instead of callbacks".to_string(),
)?;
println!(
"✓ Stored episode: Debug JavaScript async issue (ID: {})\n",
ep3
);
// Create some skills
println!("--- Creating Skills ---");
let skill1 = db.create_skill(
"Memory Profiling".to_string(),
"Profile application memory usage to detect leaks and optimize allocation".to_string(),
Default::default(),
vec![
"valgrind".to_string(),
"massif".to_string(),
"heaptrack".to_string(),
],
)?;
println!("✓ Created skill: Memory Profiling (ID: {})", skill1);
let skill2 = db.create_skill(
"Async Programming".to_string(),
"Write asynchronous code using promises, async/await, or futures".to_string(),
Default::default(),
vec![
"Promise.all()".to_string(),
"async/await".to_string(),
"tokio".to_string(),
],
)?;
println!("✓ Created skill: Async Programming (ID: {})", skill2);
let skill3 = db.create_skill(
"Performance Optimization".to_string(),
"Profile and optimize code performance using profilers and benchmarks".to_string(),
Default::default(),
vec![
"perf".to_string(),
"criterion".to_string(),
"flamegraph".to_string(),
],
)?;
println!(
"✓ Created skill: Performance Optimization (ID: {})\n",
skill3
);
// Search episodes
println!("--- Searching Episodes ---");
let query = "memory problems in programming";
println!("Query: \"{}\"", query);
let episodes = db.retrieve_similar_episodes(query, 3)?;
println!("Found {} similar episodes:\n", episodes.len());
for (i, episode) in episodes.iter().enumerate() {
println!("{}. Task: {}", i + 1, episode.task);
println!(" Critique: {}", episode.critique);
println!(" Actions: {}", episode.actions.join(""));
println!();
}
if use_api {
println!(" With OpenAI embeddings, results are semantically similar!");
println!(" 'memory problems' should match 'Rust borrow checker' and 'memory profiling'");
} else {
println!("⚠️ Hash-based embeddings are NOT semantic!");
println!(" Results are based on character overlap, not meaning.");
println!(" Set OPENAI_API_KEY to see real semantic search.");
}
// Search skills
println!("\n--- Searching Skills ---");
let query = "handling asynchronous operations";
println!("Query: \"{}\"", query);
let skills = db.search_skills(query, 3)?;
println!("Found {} similar skills:\n", skills.len());
for (i, skill) in skills.iter().enumerate() {
println!("{}. {}", i + 1, skill.name);
println!(" Description: {}", skill.description);
println!(" Examples: {}", skill.examples.join(", "));
println!();
}
println!("=== Example Complete ===");
println!("\nTips:");
println!("- Use hash-based embeddings for testing/development");
println!("- Use API embeddings (OpenAI, Cohere, Voyage) for production");
println!("- Implement ONNX provider for offline/edge deployment");
println!("- See docs/EMBEDDINGS.md for full guide");
Ok(())
}

View File

@@ -0,0 +1,264 @@
//! Quick benchmark to compare NEON SIMD vs scalar performance on Apple Silicon
//!
//! Run with: cargo run --example neon_benchmark --release -p ruvector-core
use std::time::Instant;
fn main() {
println!("╔════════════════════════════════════════════════════════════╗");
println!("║ NEON SIMD Benchmark for Apple Silicon (M4 Pro) ║");
println!("╚════════════════════════════════════════════════════════════╝\n");
// Test parameters
let dimensions = 128; // Common embedding dimension
let num_vectors = 10_000;
let num_queries = 1_000;
// Generate test data
let vectors: Vec<Vec<f32>> = (0..num_vectors)
.map(|i| {
(0..dimensions)
.map(|j| ((i * j) % 1000) as f32 / 1000.0)
.collect()
})
.collect();
let queries: Vec<Vec<f32>> = (0..num_queries)
.map(|i| {
(0..dimensions)
.map(|j| ((i * j + 500) % 1000) as f32 / 1000.0)
.collect()
})
.collect();
println!("Configuration:");
println!(" - Dimensions: {}", dimensions);
println!(" - Vectors: {}", num_vectors);
println!(" - Queries: {}", num_queries);
println!(
" - Total distance calculations: {}\n",
num_vectors * num_queries
);
#[cfg(target_arch = "aarch64")]
println!("Platform: ARM64 (Apple Silicon) - NEON enabled ✓\n");
#[cfg(target_arch = "x86_64")]
println!("Platform: x86_64 - AVX2 detection enabled\n");
// Benchmark Euclidean distance (SIMD)
println!("═══════════════════════════════════════════════════════════════");
println!("Euclidean Distance:");
println!("═══════════════════════════════════════════════════════════════");
let start = Instant::now();
let mut simd_sum = 0.0f32;
for query in &queries {
for vec in &vectors {
simd_sum += euclidean_simd(query, vec);
}
}
let simd_time = start.elapsed();
println!(
" SIMD: {:>8.2} ms (checksum: {:.4})",
simd_time.as_secs_f64() * 1000.0,
simd_sum
);
let start = Instant::now();
let mut scalar_sum = 0.0f32;
for query in &queries {
for vec in &vectors {
scalar_sum += euclidean_scalar(query, vec);
}
}
let scalar_time = start.elapsed();
println!(
" Scalar: {:>8.2} ms (checksum: {:.4})",
scalar_time.as_secs_f64() * 1000.0,
scalar_sum
);
let speedup = scalar_time.as_secs_f64() / simd_time.as_secs_f64();
println!(" Speedup: {:.2}x\n", speedup);
// Benchmark Dot Product (SIMD)
println!("═══════════════════════════════════════════════════════════════");
println!("Dot Product:");
println!("═══════════════════════════════════════════════════════════════");
let start = Instant::now();
let mut simd_sum = 0.0f32;
for query in &queries {
for vec in &vectors {
simd_sum += dot_simd(query, vec);
}
}
let simd_time = start.elapsed();
println!(
" SIMD: {:>8.2} ms (checksum: {:.4})",
simd_time.as_secs_f64() * 1000.0,
simd_sum
);
let start = Instant::now();
let mut scalar_sum = 0.0f32;
for query in &queries {
for vec in &vectors {
scalar_sum += dot_scalar(query, vec);
}
}
let scalar_time = start.elapsed();
println!(
" Scalar: {:>8.2} ms (checksum: {:.4})",
scalar_time.as_secs_f64() * 1000.0,
scalar_sum
);
let speedup = scalar_time.as_secs_f64() / simd_time.as_secs_f64();
println!(" Speedup: {:.2}x\n", speedup);
// Benchmark Cosine Similarity (SIMD)
println!("═══════════════════════════════════════════════════════════════");
println!("Cosine Similarity:");
println!("═══════════════════════════════════════════════════════════════");
let start = Instant::now();
let mut simd_sum = 0.0f32;
for query in &queries {
for vec in &vectors {
simd_sum += cosine_simd(query, vec);
}
}
let simd_time = start.elapsed();
println!(
" SIMD: {:>8.2} ms (checksum: {:.4})",
simd_time.as_secs_f64() * 1000.0,
simd_sum
);
let start = Instant::now();
let mut scalar_sum = 0.0f32;
for query in &queries {
for vec in &vectors {
scalar_sum += cosine_scalar(query, vec);
}
}
let scalar_time = start.elapsed();
println!(
" Scalar: {:>8.2} ms (checksum: {:.4})",
scalar_time.as_secs_f64() * 1000.0,
scalar_sum
);
let speedup = scalar_time.as_secs_f64() / simd_time.as_secs_f64();
println!(" Speedup: {:.2}x\n", speedup);
println!("═══════════════════════════════════════════════════════════════");
println!("Benchmark complete!");
}
// SIMD implementations (use the crate's SIMD functions)
#[cfg(target_arch = "aarch64")]
use std::arch::aarch64::*;
#[inline]
fn euclidean_simd(a: &[f32], b: &[f32]) -> f32 {
#[cfg(target_arch = "aarch64")]
unsafe {
let len = a.len();
let mut sum = vdupq_n_f32(0.0);
let chunks = len / 4;
for i in 0..chunks {
let idx = i * 4;
let va = vld1q_f32(a.as_ptr().add(idx));
let vb = vld1q_f32(b.as_ptr().add(idx));
let diff = vsubq_f32(va, vb);
sum = vfmaq_f32(sum, diff, diff);
}
let mut total = vaddvq_f32(sum);
for i in (chunks * 4)..len {
let diff = a[i] - b[i];
total += diff * diff;
}
total.sqrt()
}
#[cfg(not(target_arch = "aarch64"))]
euclidean_scalar(a, b)
}
#[inline]
fn euclidean_scalar(a: &[f32], b: &[f32]) -> f32 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x - y) * (x - y))
.sum::<f32>()
.sqrt()
}
#[inline]
fn dot_simd(a: &[f32], b: &[f32]) -> f32 {
#[cfg(target_arch = "aarch64")]
unsafe {
let len = a.len();
let mut sum = vdupq_n_f32(0.0);
let chunks = len / 4;
for i in 0..chunks {
let idx = i * 4;
let va = vld1q_f32(a.as_ptr().add(idx));
let vb = vld1q_f32(b.as_ptr().add(idx));
sum = vfmaq_f32(sum, va, vb);
}
let mut total = vaddvq_f32(sum);
for i in (chunks * 4)..len {
total += a[i] * b[i];
}
total
}
#[cfg(not(target_arch = "aarch64"))]
dot_scalar(a, b)
}
#[inline]
fn dot_scalar(a: &[f32], b: &[f32]) -> f32 {
a.iter().zip(b.iter()).map(|(x, y)| x * y).sum()
}
#[inline]
fn cosine_simd(a: &[f32], b: &[f32]) -> f32 {
#[cfg(target_arch = "aarch64")]
unsafe {
let len = a.len();
let mut dot = vdupq_n_f32(0.0);
let mut norm_a = vdupq_n_f32(0.0);
let mut norm_b = vdupq_n_f32(0.0);
let chunks = len / 4;
for i in 0..chunks {
let idx = i * 4;
let va = vld1q_f32(a.as_ptr().add(idx));
let vb = vld1q_f32(b.as_ptr().add(idx));
dot = vfmaq_f32(dot, va, vb);
norm_a = vfmaq_f32(norm_a, va, va);
norm_b = vfmaq_f32(norm_b, vb, vb);
}
let mut dot_sum = vaddvq_f32(dot);
let mut norm_a_sum = vaddvq_f32(norm_a);
let mut norm_b_sum = vaddvq_f32(norm_b);
for i in (chunks * 4)..len {
dot_sum += a[i] * b[i];
norm_a_sum += a[i] * a[i];
norm_b_sum += b[i] * b[i];
}
dot_sum / (norm_a_sum.sqrt() * norm_b_sum.sqrt())
}
#[cfg(not(target_arch = "aarch64"))]
cosine_scalar(a, b)
}
#[inline]
fn cosine_scalar(a: &[f32], b: &[f32]) -> f32 {
let dot: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
let norm_a: f32 = a.iter().map(|x| x * x).sum::<f32>().sqrt();
let norm_b: f32 = b.iter().map(|x| x * x).sum::<f32>().sqrt();
dot / (norm_a * norm_b)
}