Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,64 @@
//! Distributed Learning Example
//!
//! Demonstrates distributed Q-learning across multiple agents.
use ruvector_edge::prelude::*;
use ruvector_edge::IntelligenceSync;
use std::sync::Arc;
use tokio::sync::RwLock;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info")
.init();
println!("Distributed Learning Example");
println!("============================\n");
// Create intelligence sync for aggregated learning
let sync = Arc::new(RwLock::new(IntelligenceSync::new("swarm-coordinator")));
// Simulate multiple learning agents with their own experiences
let scenarios = vec![
("learner-001", "edit_code", "coder", 0.9),
("learner-001", "review_code", "reviewer", 0.85),
("learner-002", "test_code", "tester", 0.88),
("learner-002", "debug_error", "debugger", 0.92),
("learner-003", "deploy_app", "devops", 0.87),
("learner-003", "edit_code", "coder", 0.95), // Another agent learns edit_code
];
println!("Distributed learning phase:");
for (agent, state, action, reward) in &scenarios {
let sync_guard = sync.write().await;
sync_guard.update_pattern(state, action, *reward);
println!(" {} learned: {} -> {} ({:.2})", agent, state, action, reward);
}
// Query merged intelligence
let sync_guard = sync.read().await;
let states_to_query = vec!["edit_code", "review_code", "test_code", "debug_error", "deploy_app"];
println!("\nMerged intelligence queries:");
for state in states_to_query {
if let Some((action, confidence)) = sync_guard.get_best_action(
state,
&["coder", "reviewer", "tester", "debugger", "devops"]
.iter().map(|s| s.to_string()).collect::<Vec<_>>()
) {
println!(" {} -> {} (confidence: {:.1}%)", state, action, confidence * 100.0);
}
}
// Get swarm stats
let stats = sync_guard.get_swarm_stats();
println!("\nSwarm statistics:");
println!(" Total patterns: {}", stats.total_patterns);
println!(" Total visits: {}", stats.total_visits);
println!(" Avg confidence: {:.1}%", stats.avg_confidence * 100.0);
println!("\nDistributed learning example complete!");
Ok(())
}

View File

@@ -0,0 +1,47 @@
//! Local Swarm Example
//!
//! Demonstrates local shared-memory swarm communication.
use ruvector_edge::prelude::*;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt()
.with_env_filter("info")
.init();
println!("Local Swarm Example");
println!("==================\n");
// Create coordinator
let config = SwarmConfig::default()
.with_agent_id("local-coordinator")
.with_role(AgentRole::Coordinator);
let coordinator = SwarmAgent::new(config).await?;
println!("Created coordinator: {}", coordinator.id());
// Create worker
let worker_config = SwarmConfig::default()
.with_agent_id("local-worker-1")
.with_role(AgentRole::Worker);
let worker = SwarmAgent::new(worker_config).await?;
println!("Created worker: {}", worker.id());
// Simulate learning
worker.learn("local_task", "process_data", 0.9).await;
println!("\nWorker learned: local_task -> process_data (0.9)");
// Get best action
if let Some((action, confidence)) = worker.get_best_action(
"local_task",
&["process_data".to_string(), "skip_data".to_string()]
).await {
println!("Best action: {} (confidence: {:.1}%)", action, confidence * 100.0);
}
println!("\nLocal swarm example complete!");
Ok(())
}