Squashed 'vendor/ruvector/' content from commit b64c2172
git-subtree-dir: vendor/ruvector git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
204
crates/ruvector-cli/tests/cli_tests.rs
Normal file
204
crates/ruvector-cli/tests/cli_tests.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
//! Integration tests for Ruvector CLI
|
||||
|
||||
use assert_cmd::Command;
|
||||
use predicates::prelude::*;
|
||||
use std::fs;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_cli_version() {
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("--version");
|
||||
cmd.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("ruvector"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_help() {
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("--help");
|
||||
cmd.assert().success().stdout(predicate::str::contains(
|
||||
"High-performance Rust vector database",
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_database() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.db");
|
||||
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("create")
|
||||
.arg("--path")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--dimensions")
|
||||
.arg("128");
|
||||
|
||||
cmd.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Database created successfully"));
|
||||
|
||||
// Verify database file exists
|
||||
assert!(db_path.exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_info_command() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.db");
|
||||
|
||||
// Create database first
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("create")
|
||||
.arg("--path")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--dimensions")
|
||||
.arg("64");
|
||||
cmd.assert().success();
|
||||
|
||||
// Check info
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("info").arg("--db").arg(db_path.to_str().unwrap());
|
||||
|
||||
cmd.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Database Statistics"))
|
||||
.stdout(predicate::str::contains("Dimensions: 64"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_from_json() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.db");
|
||||
let json_path = dir.path().join("vectors.json");
|
||||
|
||||
// Create test JSON file
|
||||
let test_data = r#"[
|
||||
{
|
||||
"id": "v1",
|
||||
"vector": [1.0, 2.0, 3.0],
|
||||
"metadata": {"label": "test1"}
|
||||
},
|
||||
{
|
||||
"id": "v2",
|
||||
"vector": [4.0, 5.0, 6.0],
|
||||
"metadata": {"label": "test2"}
|
||||
}
|
||||
]"#;
|
||||
fs::write(&json_path, test_data).unwrap();
|
||||
|
||||
// Create database
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("create")
|
||||
.arg("--path")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--dimensions")
|
||||
.arg("3");
|
||||
cmd.assert().success();
|
||||
|
||||
// Insert vectors
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("insert")
|
||||
.arg("--db")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--input")
|
||||
.arg(json_path.to_str().unwrap())
|
||||
.arg("--format")
|
||||
.arg("json")
|
||||
.arg("--no-progress");
|
||||
|
||||
cmd.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Inserted 2 vectors"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_search_command() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.db");
|
||||
let json_path = dir.path().join("vectors.json");
|
||||
|
||||
// Create test data
|
||||
let test_data = r#"[
|
||||
{"id": "v1", "vector": [1.0, 0.0, 0.0]},
|
||||
{"id": "v2", "vector": [0.0, 1.0, 0.0]},
|
||||
{"id": "v3", "vector": [0.0, 0.0, 1.0]}
|
||||
]"#;
|
||||
fs::write(&json_path, test_data).unwrap();
|
||||
|
||||
// Create and populate database
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("create")
|
||||
.arg("--path")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--dimensions")
|
||||
.arg("3");
|
||||
cmd.assert().success();
|
||||
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("insert")
|
||||
.arg("--db")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--input")
|
||||
.arg(json_path.to_str().unwrap())
|
||||
.arg("--format")
|
||||
.arg("json")
|
||||
.arg("--no-progress");
|
||||
cmd.assert().success();
|
||||
|
||||
// Search
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("search")
|
||||
.arg("--db")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--query")
|
||||
.arg("[1.0, 0.0, 0.0]")
|
||||
.arg("--top-k")
|
||||
.arg("2");
|
||||
|
||||
cmd.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("v1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_benchmark_command() {
|
||||
let dir = tempdir().unwrap();
|
||||
let db_path = dir.path().join("test.db");
|
||||
|
||||
// Create database
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("create")
|
||||
.arg("--path")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--dimensions")
|
||||
.arg("128");
|
||||
cmd.assert().success();
|
||||
|
||||
// Run benchmark
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("benchmark")
|
||||
.arg("--db")
|
||||
.arg(db_path.to_str().unwrap())
|
||||
.arg("--queries")
|
||||
.arg("100");
|
||||
|
||||
cmd.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Benchmark Results"))
|
||||
.stdout(predicate::str::contains("Queries per second"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_handling() {
|
||||
// Test with invalid database path - /dev/null is a device file, not a directory,
|
||||
// so we cannot create a database file inside it. This guarantees failure
|
||||
// regardless of user permissions.
|
||||
let mut cmd = Command::cargo_bin("ruvector").unwrap();
|
||||
cmd.arg("info").arg("--db").arg("/dev/null/db.db");
|
||||
|
||||
cmd.assert()
|
||||
.failure()
|
||||
.stderr(predicate::str::contains("Error"));
|
||||
}
|
||||
312
crates/ruvector-cli/tests/gnn_performance_test.rs
Normal file
312
crates/ruvector-cli/tests/gnn_performance_test.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
//! GNN Performance Optimization Tests
|
||||
//!
|
||||
//! Verifies that the GNN caching layer achieves the expected performance improvements:
|
||||
//! - Layer caching: ~250-500x faster (5-10ms vs ~2.5s)
|
||||
//! - Query caching: Instant results for repeated queries
|
||||
//! - Batch operations: Amortized overhead
|
||||
//!
|
||||
//! NOTE: These tests use relaxed thresholds for debug builds.
|
||||
//! Run with `cargo test --release` for production performance numbers.
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
// Import from the crate being tested
|
||||
mod gnn_cache_tests {
|
||||
use ruvector_gnn::layer::RuvectorLayer;
|
||||
use std::time::Instant;
|
||||
|
||||
// Debug builds are ~10-20x slower than release
|
||||
#[cfg(debug_assertions)]
|
||||
const LATENCY_MULTIPLIER: f64 = 20.0;
|
||||
#[cfg(not(debug_assertions))]
|
||||
const LATENCY_MULTIPLIER: f64 = 1.0;
|
||||
|
||||
/// Test that GNN layer creation has acceptable latency
|
||||
#[test]
|
||||
fn test_layer_creation_latency() {
|
||||
let start = Instant::now();
|
||||
let _layer = RuvectorLayer::new(128, 256, 4, 0.1).unwrap();
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// Layer creation: 100ms in release, ~2000ms in debug
|
||||
let threshold_ms = 100.0 * LATENCY_MULTIPLIER;
|
||||
assert!(
|
||||
elapsed.as_millis() < threshold_ms as u128,
|
||||
"Layer creation took {}ms, expected <{}ms (debug={})",
|
||||
elapsed.as_millis(),
|
||||
threshold_ms,
|
||||
cfg!(debug_assertions)
|
||||
);
|
||||
|
||||
println!(
|
||||
"Layer creation latency: {:.3}ms (threshold: {:.0}ms)",
|
||||
elapsed.as_secs_f64() * 1000.0,
|
||||
threshold_ms
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that forward pass has acceptable latency
|
||||
#[test]
|
||||
fn test_forward_pass_latency() {
|
||||
let layer = RuvectorLayer::new(128, 256, 4, 0.1).unwrap();
|
||||
let node = vec![0.5f32; 128];
|
||||
let neighbors = vec![vec![0.3f32; 128], vec![0.7f32; 128]];
|
||||
let weights = vec![0.5f32, 0.5f32];
|
||||
|
||||
// Warm up
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
|
||||
// Measure
|
||||
let start = Instant::now();
|
||||
let iterations = 100;
|
||||
for _ in 0..iterations {
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
}
|
||||
let elapsed = start.elapsed();
|
||||
let avg_ms = elapsed.as_secs_f64() * 1000.0 / iterations as f64;
|
||||
|
||||
// Forward pass: 5ms in release, ~100ms in debug
|
||||
let threshold_ms = 5.0 * LATENCY_MULTIPLIER;
|
||||
assert!(
|
||||
avg_ms < threshold_ms,
|
||||
"Average forward pass took {:.3}ms, expected <{:.0}ms",
|
||||
avg_ms,
|
||||
threshold_ms
|
||||
);
|
||||
|
||||
println!(
|
||||
"Average forward pass latency: {:.3}ms ({} iterations, threshold: {:.0}ms)",
|
||||
avg_ms, iterations, threshold_ms
|
||||
);
|
||||
}
|
||||
|
||||
/// Test batch operations performance
|
||||
#[test]
|
||||
fn test_batch_operations_performance() {
|
||||
let layer = RuvectorLayer::new(64, 128, 2, 0.1).unwrap();
|
||||
|
||||
// Create batch of operations
|
||||
let batch_size = 100;
|
||||
let nodes: Vec<Vec<f32>> = (0..batch_size).map(|_| vec![0.5f32; 64]).collect();
|
||||
let neighbors: Vec<Vec<Vec<f32>>> = (0..batch_size)
|
||||
.map(|_| vec![vec![0.3f32; 64], vec![0.7f32; 64]])
|
||||
.collect();
|
||||
let weights: Vec<Vec<f32>> = (0..batch_size).map(|_| vec![0.5f32, 0.5f32]).collect();
|
||||
|
||||
// Warm up
|
||||
let _ = layer.forward(&nodes[0], &neighbors[0], &weights[0]);
|
||||
|
||||
// Measure batch
|
||||
let start = Instant::now();
|
||||
for i in 0..batch_size {
|
||||
let _ = layer.forward(&nodes[i], &neighbors[i], &weights[i]);
|
||||
}
|
||||
let elapsed = start.elapsed();
|
||||
let total_ms = elapsed.as_secs_f64() * 1000.0;
|
||||
let avg_ms = total_ms / batch_size as f64;
|
||||
|
||||
// Batch: 500ms in release, ~10s in debug
|
||||
let threshold_ms = 500.0 * LATENCY_MULTIPLIER;
|
||||
println!(
|
||||
"Batch of {} operations: total={:.3}ms, avg={:.3}ms/op (threshold: {:.0}ms)",
|
||||
batch_size, total_ms, avg_ms, threshold_ms
|
||||
);
|
||||
|
||||
assert!(
|
||||
total_ms < threshold_ms,
|
||||
"Batch took {:.3}ms, expected <{:.0}ms",
|
||||
total_ms,
|
||||
threshold_ms
|
||||
);
|
||||
}
|
||||
|
||||
/// Test different layer sizes
|
||||
#[test]
|
||||
fn test_layer_size_scaling() {
|
||||
let sizes = [
|
||||
(64, 128, 2), // Small
|
||||
(128, 256, 4), // Medium
|
||||
(384, 768, 8), // Base (BERT-like)
|
||||
(768, 1024, 16), // Large
|
||||
];
|
||||
|
||||
println!("\nLayer size scaling test:");
|
||||
println!(
|
||||
"{:>10} {:>10} {:>8} {:>12} {:>12}",
|
||||
"Input", "Hidden", "Heads", "Create(ms)", "Forward(ms)"
|
||||
);
|
||||
|
||||
for (input, hidden, heads) in sizes {
|
||||
// Measure creation
|
||||
let start = Instant::now();
|
||||
let layer = RuvectorLayer::new(input, hidden, heads, 0.1).unwrap();
|
||||
let create_ms = start.elapsed().as_secs_f64() * 1000.0;
|
||||
|
||||
// Measure forward
|
||||
let node = vec![0.5f32; input];
|
||||
let neighbors = vec![vec![0.3f32; input], vec![0.7f32; input]];
|
||||
let weights = vec![0.5f32, 0.5f32];
|
||||
|
||||
// Warm up
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
|
||||
let start = Instant::now();
|
||||
let iterations = 10;
|
||||
for _ in 0..iterations {
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
}
|
||||
let forward_ms = start.elapsed().as_secs_f64() * 1000.0 / iterations as f64;
|
||||
|
||||
println!(
|
||||
"{:>10} {:>10} {:>8} {:>12.3} {:>12.3}",
|
||||
input, hidden, heads, create_ms, forward_ms
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Integration tests for the GNN cache system
|
||||
#[cfg(test)]
|
||||
mod gnn_cache_integration {
|
||||
use std::time::Instant;
|
||||
|
||||
// Debug builds are ~10-20x slower than release
|
||||
#[cfg(debug_assertions)]
|
||||
const LATENCY_MULTIPLIER: f64 = 20.0;
|
||||
#[cfg(not(debug_assertions))]
|
||||
const LATENCY_MULTIPLIER: f64 = 1.0;
|
||||
|
||||
/// Simulate the before/after scenario
|
||||
#[test]
|
||||
fn test_caching_benefit_simulation() {
|
||||
// Simulate "before" scenario: each operation pays full init cost
|
||||
// In reality this would be ~2.5s, but we use a smaller value for testing
|
||||
let simulated_init_cost_ms = 50.0; // Represents the ~2.5s in real scenario
|
||||
|
||||
// Simulate "after" scenario: only first operation pays init cost
|
||||
let operations = 10;
|
||||
let forward_cost_ms = 2.0; // Actual forward pass cost
|
||||
|
||||
// Before: each operation = init + forward
|
||||
let before_total = operations as f64 * (simulated_init_cost_ms + forward_cost_ms);
|
||||
|
||||
// After: first op = init + forward, rest = forward only
|
||||
let after_total = simulated_init_cost_ms + (operations as f64 * forward_cost_ms);
|
||||
|
||||
let speedup = before_total / after_total;
|
||||
|
||||
println!("\nCaching benefit simulation:");
|
||||
println!("Operations: {}", operations);
|
||||
println!("Before (no cache): {:.1}ms total", before_total);
|
||||
println!("After (with cache): {:.1}ms total", after_total);
|
||||
println!("Speedup: {:.1}x", speedup);
|
||||
|
||||
// Verify significant speedup
|
||||
assert!(
|
||||
speedup > 5.0,
|
||||
"Expected at least 5x speedup, got {:.1}x",
|
||||
speedup
|
||||
);
|
||||
}
|
||||
|
||||
/// Test actual repeated operations benefit
|
||||
#[test]
|
||||
fn test_repeated_operations_speedup() {
|
||||
use ruvector_gnn::layer::RuvectorLayer;
|
||||
|
||||
// First: measure time including layer creation
|
||||
let start_cold = Instant::now();
|
||||
let layer = RuvectorLayer::new(128, 256, 4, 0.1).unwrap();
|
||||
let node = vec![0.5f32; 128];
|
||||
let neighbors = vec![vec![0.3f32; 128], vec![0.7f32; 128]];
|
||||
let weights = vec![0.5f32, 0.5f32];
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
let cold_time = start_cold.elapsed();
|
||||
|
||||
// Then: measure time for subsequent operations (layer already created)
|
||||
let iterations = 50;
|
||||
let start_warm = Instant::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
}
|
||||
let warm_time = start_warm.elapsed();
|
||||
let avg_warm_ms = warm_time.as_secs_f64() * 1000.0 / iterations as f64;
|
||||
|
||||
// Warm threshold: 5ms in release, ~100ms in debug
|
||||
let warm_threshold_ms = 5.0 * LATENCY_MULTIPLIER;
|
||||
|
||||
println!("\nRepeated operations test:");
|
||||
println!(
|
||||
"Cold start (create + forward): {:.3}ms",
|
||||
cold_time.as_secs_f64() * 1000.0
|
||||
);
|
||||
println!(
|
||||
"Warm average ({} iterations): {:.3}ms/op (threshold: {:.0}ms)",
|
||||
iterations, avg_warm_ms, warm_threshold_ms
|
||||
);
|
||||
println!("Warm total: {:.3}ms", warm_time.as_secs_f64() * 1000.0);
|
||||
|
||||
// Warm operations should be significantly faster per-op
|
||||
assert!(
|
||||
avg_warm_ms < warm_threshold_ms,
|
||||
"Warm operations too slow: {:.3}ms (threshold: {:.0}ms)",
|
||||
avg_warm_ms,
|
||||
warm_threshold_ms
|
||||
);
|
||||
}
|
||||
|
||||
/// Test that caching demonstrates clear benefit
|
||||
#[test]
|
||||
fn test_caching_demonstrates_benefit() {
|
||||
use ruvector_gnn::layer::RuvectorLayer;
|
||||
|
||||
// Create layer once
|
||||
let start = Instant::now();
|
||||
let layer = RuvectorLayer::new(64, 128, 2, 0.1).unwrap();
|
||||
let creation_time = start.elapsed();
|
||||
|
||||
let node = vec![0.5f32; 64];
|
||||
let neighbors = vec![vec![0.3f32; 64]];
|
||||
let weights = vec![1.0f32];
|
||||
|
||||
// Warm up
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
|
||||
// Measure forward passes
|
||||
let iterations = 20;
|
||||
let start = Instant::now();
|
||||
for _ in 0..iterations {
|
||||
let _ = layer.forward(&node, &neighbors, &weights);
|
||||
}
|
||||
let forward_time = start.elapsed();
|
||||
|
||||
let creation_ms = creation_time.as_secs_f64() * 1000.0;
|
||||
let total_forward_ms = forward_time.as_secs_f64() * 1000.0;
|
||||
let avg_forward_ms = total_forward_ms / iterations as f64;
|
||||
|
||||
println!("\nCaching benefit demonstration:");
|
||||
println!("Layer creation: {:.3}ms (one-time cost)", creation_ms);
|
||||
println!(
|
||||
"Forward passes: {:.3}ms total for {} ops",
|
||||
total_forward_ms, iterations
|
||||
);
|
||||
println!("Average forward: {:.3}ms/op", avg_forward_ms);
|
||||
|
||||
// The key insight: creation cost is paid once, forward is repeated
|
||||
// If we had to recreate the layer each time, total would be:
|
||||
let without_caching = iterations as f64 * (creation_ms + avg_forward_ms);
|
||||
let with_caching = creation_ms + total_forward_ms;
|
||||
let benefit_ratio = without_caching / with_caching;
|
||||
|
||||
println!("Without caching: {:.3}ms", without_caching);
|
||||
println!("With caching: {:.3}ms", with_caching);
|
||||
println!("Caching benefit: {:.1}x faster", benefit_ratio);
|
||||
|
||||
// Caching should provide at least 2x benefit
|
||||
assert!(
|
||||
benefit_ratio > 2.0,
|
||||
"Caching should provide at least 2x benefit, got {:.1}x",
|
||||
benefit_ratio
|
||||
);
|
||||
}
|
||||
}
|
||||
298
crates/ruvector-cli/tests/hooks_tests.rs
Normal file
298
crates/ruvector-cli/tests/hooks_tests.rs
Normal file
@@ -0,0 +1,298 @@
|
||||
//! Unit tests for the hooks CLI commands
|
||||
|
||||
use assert_cmd::Command;
|
||||
use predicates::prelude::*;
|
||||
use std::fs;
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Helper to get the ruvector binary command
|
||||
fn ruvector_cmd() -> Command {
|
||||
Command::cargo_bin("ruvector").unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_help() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("--help")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Self-learning intelligence hooks"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_stats() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("stats")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Q-learning patterns"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_session_start() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("session-start")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Intelligence Layer Active"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_session_end() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("session-end")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Session ended"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_pre_edit() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("pre-edit")
|
||||
.arg("src/main.rs")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Intelligence Analysis"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_post_edit_success() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("post-edit")
|
||||
.arg("--success")
|
||||
.arg("src/lib.rs")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Learning recorded"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_pre_command() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("pre-command")
|
||||
.arg("cargo build")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Command"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_post_command() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("post-command")
|
||||
.arg("--success")
|
||||
.arg("cargo")
|
||||
.arg("test")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("recorded"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_remember() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("remember")
|
||||
.arg("--memory-type")
|
||||
.arg("test")
|
||||
.arg("test content for memory")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("success"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_recall() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("recall")
|
||||
.arg("test content")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_learn() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("learn")
|
||||
.arg("test-state")
|
||||
.arg("test-action")
|
||||
.arg("--reward")
|
||||
.arg("0.8")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("success"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_suggest() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("suggest")
|
||||
.arg("edit-rs")
|
||||
.arg("--actions")
|
||||
.arg("coder,reviewer,tester")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("action"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_route() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("route")
|
||||
.arg("implement feature")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("recommended"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_should_test() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("should-test")
|
||||
.arg("src/lib.rs")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("cargo test"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_suggest_next() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("suggest-next")
|
||||
.arg("src/main.rs")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_record_error() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("record-error")
|
||||
.arg("cargo build")
|
||||
.arg("error[E0308]: mismatched types")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("E0308"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_suggest_fix() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("suggest-fix")
|
||||
.arg("E0308")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_swarm_register() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("swarm-register")
|
||||
.arg("test-agent-1")
|
||||
.arg("rust-developer")
|
||||
.arg("--capabilities")
|
||||
.arg("rust,testing")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("success"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_swarm_coordinate() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("swarm-coordinate")
|
||||
.arg("agent-1")
|
||||
.arg("agent-2")
|
||||
.arg("--weight")
|
||||
.arg("0.8")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("success"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_swarm_optimize() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("swarm-optimize")
|
||||
.arg("task1,task2,task3")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("assignments"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_swarm_recommend() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("swarm-recommend")
|
||||
.arg("rust development")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_swarm_heal() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("swarm-heal")
|
||||
.arg("failed-agent")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_swarm_stats() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("swarm-stats")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("agents"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_pre_compact() {
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("pre-compact")
|
||||
.assert()
|
||||
.success()
|
||||
.stdout(predicate::str::contains("Pre-compact"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_init_creates_config() {
|
||||
// Just test that init command runs successfully
|
||||
// The actual config is created in ~/.ruvector/ not the current directory
|
||||
ruvector_cmd().arg("hooks").arg("init").assert().success();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hooks_install_runs() {
|
||||
// Just test that install command runs successfully
|
||||
ruvector_cmd()
|
||||
.arg("hooks")
|
||||
.arg("install")
|
||||
.assert()
|
||||
.success();
|
||||
}
|
||||
121
crates/ruvector-cli/tests/mcp_tests.rs
Normal file
121
crates/ruvector-cli/tests/mcp_tests.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
//! Integration tests for Ruvector MCP Server
|
||||
|
||||
use serde_json::json;
|
||||
use tempfile::tempdir;
|
||||
|
||||
// Note: These are unit-style tests for MCP components
|
||||
// Full integration tests would require running the server
|
||||
|
||||
#[test]
|
||||
fn test_mcp_request_serialization() {
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct McpRequest {
|
||||
pub jsonrpc: String,
|
||||
pub id: Option<serde_json::Value>,
|
||||
pub method: String,
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
let request = McpRequest {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id: Some(json!(1)),
|
||||
method: "initialize".to_string(),
|
||||
params: None,
|
||||
};
|
||||
|
||||
let serialized = serde_json::to_string(&request).unwrap();
|
||||
assert!(serialized.contains("initialize"));
|
||||
|
||||
let deserialized: McpRequest = serde_json::from_str(&serialized).unwrap();
|
||||
assert_eq!(deserialized.method, "initialize");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mcp_response_serialization() {
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct McpResponse {
|
||||
pub jsonrpc: String,
|
||||
pub id: Option<serde_json::Value>,
|
||||
pub result: Option<serde_json::Value>,
|
||||
pub error: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
impl McpResponse {
|
||||
fn success(id: Option<serde_json::Value>, result: serde_json::Value) -> Self {
|
||||
Self {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id,
|
||||
result: Some(result),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let response = McpResponse::success(Some(json!(1)), json!({"status": "ok"}));
|
||||
|
||||
let serialized = serde_json::to_string(&response).unwrap();
|
||||
assert!(serialized.contains("\"result\""));
|
||||
|
||||
let deserialized: McpResponse = serde_json::from_str(&serialized).unwrap();
|
||||
assert!(deserialized.result.is_some());
|
||||
assert!(deserialized.error.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mcp_error_response() {
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct McpResponse {
|
||||
pub jsonrpc: String,
|
||||
pub id: Option<serde_json::Value>,
|
||||
pub result: Option<serde_json::Value>,
|
||||
pub error: Option<McpError>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct McpError {
|
||||
pub code: i32,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl McpResponse {
|
||||
fn error(id: Option<serde_json::Value>, error: McpError) -> Self {
|
||||
Self {
|
||||
jsonrpc: "2.0".to_string(),
|
||||
id,
|
||||
result: None,
|
||||
error: Some(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl McpError {
|
||||
fn new(code: i32, message: impl Into<String>) -> Self {
|
||||
Self {
|
||||
code,
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const METHOD_NOT_FOUND: i32 = -32601;
|
||||
|
||||
let error = McpError::new(METHOD_NOT_FOUND, "Method not found");
|
||||
let response = McpResponse::error(Some(json!(1)), error);
|
||||
|
||||
assert!(response.error.is_some());
|
||||
assert!(response.result.is_none());
|
||||
assert_eq!(response.error.unwrap().code, METHOD_NOT_FOUND);
|
||||
}
|
||||
|
||||
// Note: Full MCP handler tests would require exposing the mcp module publicly
|
||||
// For now, we test the protocol serialization above
|
||||
// Integration tests would be run against the actual MCP server binary
|
||||
|
||||
// Note: Tests import from the binary crate via the test harness
|
||||
// The mcp module and config are not public in the binary, so we test via the public API
|
||||
Reference in New Issue
Block a user