Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,41 @@
[package]
name = "ruvector-robotics-examples"
version = "0.1.0"
edition = "2021"
publish = false
[dependencies]
ruvector-robotics = { path = "../../crates/ruvector-robotics" }
serde_json = "1.0"
rand = "0.8"
[[bin]]
name = "01_basic_perception"
path = "src/bin/01_basic_perception.rs"
[[bin]]
name = "02_obstacle_avoidance"
path = "src/bin/02_obstacle_avoidance.rs"
[[bin]]
name = "03_scene_graph"
path = "src/bin/03_scene_graph.rs"
[[bin]]
name = "04_behavior_tree"
path = "src/bin/04_behavior_tree.rs"
[[bin]]
name = "05_cognitive_robot"
path = "src/bin/05_cognitive_robot.rs"
[[bin]]
name = "06_swarm_coordination"
path = "src/bin/06_swarm_coordination.rs"
[[bin]]
name = "07_skill_learning"
path = "src/bin/07_skill_learning.rs"
[[bin]]
name = "08_world_model"
path = "src/bin/08_world_model.rs"
[[bin]]
name = "09_mcp_tools"
path = "src/bin/09_mcp_tools.rs"
[[bin]]
name = "10_full_pipeline"
path = "src/bin/10_full_pipeline.rs"

View File

@@ -0,0 +1,95 @@
/// Example 01: Basic Perception - Point cloud processing and spatial search
///
/// Demonstrates:
/// - Creating point clouds from simulated room walls
/// - Spatial nearest-neighbour search (kNN)
/// - Radius search around a query point
/// - Using the SpatialIndex for efficient lookups
/// - Distance-based wall proximity analysis
use ruvector_robotics::bridge::{Point3D, PointCloud, SpatialIndex};
/// Generate a wall as a strip of points along one axis.
fn generate_wall(start: [f32; 3], end: [f32; 3], num_points: usize) -> Vec<Point3D> {
(0..num_points)
.map(|i| {
let t = i as f32 / (num_points - 1).max(1) as f32;
Point3D::new(
start[0] + t * (end[0] - start[0]),
start[1] + t * (end[1] - start[1]),
start[2] + t * (end[2] - start[2]),
)
})
.collect()
}
fn main() {
println!("=== Example 01: Basic Perception ===");
println!();
// Step 1: Build a simulated room (4 walls, 5m x 5m)
let points_per_wall = 50;
let mut all_points = Vec::new();
all_points.extend(generate_wall([0.0, 5.0, 0.0], [5.0, 5.0, 0.0], points_per_wall));
all_points.extend(generate_wall([0.0, 0.0, 0.0], [5.0, 0.0, 0.0], points_per_wall));
all_points.extend(generate_wall([0.0, 0.0, 0.0], [0.0, 5.0, 0.0], points_per_wall));
all_points.extend(generate_wall([5.0, 0.0, 0.0], [5.0, 5.0, 0.0], points_per_wall));
let cloud = PointCloud::new(all_points, 1000);
println!("[1] Room point cloud created: {} points from 4 walls", cloud.len());
// Step 2: Insert into spatial index
let mut index = SpatialIndex::new(3);
index.insert_point_cloud(&cloud);
println!("[2] Spatial index built with {} points", index.len());
// Step 3: Robot position in the center of the room
let robot_pos = Point3D::new(2.5, 2.5, 0.0);
println!("[3] Robot position: ({:.1}, {:.1}, {:.1})", robot_pos.x, robot_pos.y, robot_pos.z);
// Step 4: kNN search (SpatialIndex uses f32 queries)
let k = 5;
let query = [robot_pos.x, robot_pos.y, robot_pos.z];
match index.search_nearest(&query, k) {
Ok(results) => {
println!();
println!("[4] {} nearest points to robot:", k);
for (rank, (idx, dist)) in results.iter().enumerate() {
let p = &cloud.points[*idx];
println!(
" #{}: idx={}, ({:.2}, {:.2}, {:.2}), distance={:.3}m",
rank, idx, p.x, p.y, p.z, dist
);
}
}
Err(e) => println!("[4] Search error: {:?}", e),
}
// Step 5: Radius search
let radius = 2.0_f32;
match index.search_radius(&query, radius) {
Ok(results) => {
println!();
println!("[5] Points within {:.1}m of robot: {}", radius, results.len());
}
Err(e) => println!("[5] Search error: {:?}", e),
}
// Step 6: Wall proximity analysis using direct distance computation
println!();
println!("[6] Wall proximity analysis:");
let wall_names = ["North", "South", "West", "East"];
for (i, name) in wall_names.iter().enumerate() {
let wall_start = i * points_per_wall;
let wall_end = wall_start + points_per_wall;
let min_dist = cloud.points[wall_start..wall_end]
.iter()
.map(|p| robot_pos.distance_to(p))
.fold(f32::MAX, f32::min);
println!(" {:>5} wall: closest point at {:.3}m", name, min_dist);
}
println!();
println!("[done] Basic perception example complete.");
}

View File

@@ -0,0 +1,126 @@
/// Example 02: Obstacle Avoidance - Detect, classify, and compute safe distances
///
/// Demonstrates:
/// - Creating a point cloud with clustered obstacle points
/// - Detecting obstacles using the PerceptionPipeline
/// - Classifying obstacles by geometry (Static, Dynamic, Unknown)
/// - Computing distances and safety margins
use rand::Rng;
use ruvector_robotics::bridge::{Point3D, PointCloud};
use ruvector_robotics::perception::{PerceptionConfig, PerceptionPipeline};
fn main() {
println!("=== Example 02: Obstacle Avoidance ===");
println!();
let mut rng = rand::thread_rng();
let robot_pos = [0.0_f64, 0.0, 0.0];
// Step 1: Generate a point cloud with several obstacle clusters
let mut points = Vec::new();
let obstacle_centers: Vec<[f32; 3]> = vec![
[2.0, 1.0, 0.0], // near
[4.0, -1.0, 0.0], // medium range
[7.0, 3.0, 0.0], // far
[1.5, -2.0, 0.0], // very near
];
println!("[1] Generating obstacle clusters:");
for (i, center) in obstacle_centers.iter().enumerate() {
let cluster_size = rng.gen_range(5..15);
for _ in 0..cluster_size {
points.push(Point3D::new(
center[0] + rng.gen_range(-0.3..0.3),
center[1] + rng.gen_range(-0.3..0.3),
center[2] + rng.gen_range(-0.1..0.1),
));
}
let dist = ((center[0] as f64).powi(2) + (center[1] as f64).powi(2)).sqrt();
println!(
" Cluster {}: center=({:.1}, {:.1}), ~{} points, dist={:.2}m",
i, center[0], center[1], cluster_size, dist
);
}
// Add some scattered noise points
for _ in 0..20 {
points.push(Point3D::new(
rng.gen_range(-10.0..10.0),
rng.gen_range(-10.0..10.0),
rng.gen_range(-0.1..0.1),
));
}
let cloud = PointCloud::new(points, 1000);
println!(" Total points: {} (including 20 noise)", cloud.len());
println!();
// Step 2: Run perception pipeline
let config = PerceptionConfig::default();
let mut pipeline = PerceptionPipeline::new(config);
let (obstacles, graph) = pipeline.process(&cloud, &robot_pos);
println!("[2] Perception pipeline results:");
println!(" Detected obstacles: {}", obstacles.len());
println!(
" Scene graph: {} objects, {} edges",
graph.objects.len(),
graph.edges.len()
);
println!();
// Step 3: Report each obstacle
println!("[3] Obstacle details:");
println!(
" {:>3} {:>20} {:>8} {:>8} {:>6}",
"#", "Center (x, y, z)", "Extent", "MinDist", "Points"
);
println!(" {}", "-".repeat(56));
for (i, obs) in obstacles.iter().enumerate() {
let max_extent = obs.extent.iter().fold(0.0_f64, |a, &b| a.max(b));
println!(
" {:>3} ({:>5.2}, {:>5.2}, {:>5.2}) {:>6.2}m {:>6.2}m {:>6}",
i,
obs.center[0],
obs.center[1],
obs.center[2],
max_extent,
obs.min_distance,
obs.point_count,
);
}
// Step 4: Classify obstacles
let classified = pipeline.classify(&obstacles);
println!();
println!("[4] Obstacle classification:");
for (i, c) in classified.iter().enumerate() {
println!(
" Obstacle {}: class={:?}, confidence={:.2}, dist={:.2}m",
i, c.class, c.confidence, c.obstacle.min_distance
);
}
// Step 5: Safety assessment
println!();
let safety_margin = 1.5;
let dangerous = obstacles
.iter()
.filter(|o| o.min_distance < safety_margin)
.count();
println!("[5] Safety assessment (margin={:.1}m):", safety_margin);
println!(" Obstacles within safety margin: {}", dangerous);
println!(
" Status: {}",
if dangerous > 0 {
"CAUTION - obstacles nearby"
} else {
"CLEAR"
}
);
println!();
println!("[done] Obstacle avoidance example complete.");
}

View File

@@ -0,0 +1,106 @@
/// Example 03: Scene Graph - Build, inspect, and merge scene graphs
///
/// Demonstrates:
/// - Creating SceneObjects with positions and extents
/// - Building a SceneGraph from objects using SceneGraphBuilder
/// - Inspecting computed edges (spatial relationships)
/// - Merging two scene graphs into one
use ruvector_robotics::bridge::SceneObject;
use ruvector_robotics::perception::SceneGraphBuilder;
fn main() {
println!("=== Example 03: Scene Graph ===");
println!();
// -- Step 1: Create scene objects --
let objects = vec![
{
let mut o = SceneObject::new(0, [1.0, 1.0, 0.0], [0.3, 0.3, 0.5]);
o.label = "table".into();
o.confidence = 0.95;
o
},
{
let mut o = SceneObject::new(1, [1.5, 1.2, 0.0], [0.2, 0.2, 0.8]);
o.label = "chair".into();
o.confidence = 0.90;
o
},
{
let mut o = SceneObject::new(2, [5.0, 5.0, 0.0], [1.0, 0.1, 1.0]);
o.label = "wall".into();
o.confidence = 0.99;
o
},
{
let mut o = SceneObject::new(3, [5.5, 4.5, 0.0], [0.3, 0.3, 0.3]);
o.label = "box".into();
o.confidence = 0.85;
o
},
];
println!("[1] Scene objects ({}):", objects.len());
for obj in &objects {
println!(
" id={} '{}' at ({:.1}, {:.1}, {:.1}) conf={:.2}",
obj.id, obj.label, obj.center[0], obj.center[1], obj.center[2], obj.confidence
);
}
// -- Step 2: Build scene graph --
let builder = SceneGraphBuilder::new(3.0, 256);
let graph = builder.build(objects, 1000);
println!();
println!("[2] Scene graph: {} objects, {} edges", graph.objects.len(), graph.edges.len());
for edge in &graph.edges {
let from_label = &graph.objects.iter().find(|o| o.id == edge.from).unwrap().label;
let to_label = &graph.objects.iter().find(|o| o.id == edge.to).unwrap().label;
println!(
" {} --[{}, {:.2}m]--> {}",
from_label, edge.relation, edge.distance, to_label
);
}
// -- Step 3: Build a second scene and merge --
let objects_b = vec![
{
let mut o = SceneObject::new(0, [8.0, 1.0, 0.0], [0.4, 0.4, 0.4]);
o.label = "robot_peer".into();
o.confidence = 0.80;
o
},
{
let mut o = SceneObject::new(1, [8.5, 1.5, 0.0], [0.2, 0.2, 0.6]);
o.label = "barrel".into();
o.confidence = 0.88;
o
},
];
let graph_b = builder.build(objects_b, 2000);
println!();
println!("[3] Second scene: {} objects, {} edges", graph_b.objects.len(), graph_b.edges.len());
let wide_builder = SceneGraphBuilder::new(10.0, 256);
let merged = wide_builder.merge(&graph, &graph_b);
println!();
println!(
"[4] Merged scene graph: {} objects, {} edges, timestamp={}",
merged.objects.len(),
merged.edges.len(),
merged.timestamp
);
for obj in &merged.objects {
println!(
" id={} '{}' at ({:.1}, {:.1}, {:.1})",
obj.id, obj.label, obj.center[0], obj.center[1], obj.center[2]
);
}
println!();
println!("[done] Scene graph example complete.");
}

View File

@@ -0,0 +1,151 @@
/// Example 04: Behavior Tree - Declarative robot control with status transitions
///
/// Demonstrates:
/// - Building a patrol behavior tree with Selector and Sequence nodes
/// - Setting conditions and action results dynamically
/// - Observing tick-by-tick status transitions
/// - Using decorators (Inverter, Repeat, Timeout)
///
/// Tree structure:
/// Root (Selector)
/// |-- Avoid (Sequence): [Condition("obstacle_near")] -> [Action("evade")]
/// |-- Patrol (Sequence): [Action("select_wp")] -> [Action("move")] -> [Action("wait")]
use ruvector_robotics::cognitive::{BehaviorNode, BehaviorStatus, BehaviorTree, DecoratorType};
fn main() {
println!("=== Example 04: Behavior Tree ===");
println!();
// Step 1: Build the behavior tree
let avoid_subtree = BehaviorNode::Sequence(vec![
BehaviorNode::Condition("obstacle_near".into()),
BehaviorNode::Action("evade".into()),
]);
let patrol_subtree = BehaviorNode::Sequence(vec![
BehaviorNode::Action("select_waypoint".into()),
BehaviorNode::Action("move_to_waypoint".into()),
BehaviorNode::Action("wait_at_waypoint".into()),
]);
let root = BehaviorNode::Selector(vec![avoid_subtree, patrol_subtree]);
let mut tree = BehaviorTree::new(root);
println!("[1] Behavior tree built:");
println!(" Root (Selector)");
println!(" Avoid (Sequence): [obstacle_near?] -> [evade]");
println!(" Patrol (Sequence): [select_wp] -> [move] -> [wait]");
println!();
// Step 2: Simulate normal patrol (no obstacle)
println!("[2] Normal patrol (no obstacle):");
tree.set_condition("obstacle_near", false);
tree.set_action_result("select_waypoint", BehaviorStatus::Success);
tree.set_action_result("move_to_waypoint", BehaviorStatus::Success);
tree.set_action_result("wait_at_waypoint", BehaviorStatus::Success);
for tick in 0..3 {
let status = tree.tick();
println!(
" Tick {}: {:?} (tick_count={})",
tick,
status,
tree.context().tick_count
);
}
println!();
// Step 3: Simulate obstacle detection
println!("[3] Obstacle detected:");
tree.set_condition("obstacle_near", true);
tree.set_action_result("evade", BehaviorStatus::Running);
let status = tree.tick();
println!(" Tick: {:?} (evade is Running)", status);
tree.set_action_result("evade", BehaviorStatus::Success);
let status = tree.tick();
println!(" Tick: {:?} (evade succeeded, obstacle handled)", status);
// Clear obstacle
tree.set_condition("obstacle_near", false);
let status = tree.tick();
println!(" Tick: {:?} (back to patrol)", status);
println!();
// Step 4: Movement failure scenario
println!("[4] Movement failure scenario:");
tree.set_action_result("move_to_waypoint", BehaviorStatus::Failure);
let status = tree.tick();
println!(
" Tick: {:?} (movement failed, Sequence returns Failure)",
status
);
tree.set_action_result("move_to_waypoint", BehaviorStatus::Success);
let status = tree.tick();
println!(" Tick: {:?} (movement recovered)", status);
println!();
// Step 5: Decorator examples
println!("[5] Decorator examples:");
// Inverter: turn success into failure
let inverted = BehaviorNode::Decorator(
DecoratorType::Inverter,
Box::new(BehaviorNode::Action("check".into())),
);
let mut inv_tree = BehaviorTree::new(inverted);
inv_tree.set_action_result("check", BehaviorStatus::Success);
let status = inv_tree.tick();
println!(" Inverter(Success) = {:?}", status);
// Repeat: run an action 3 times
let repeated = BehaviorNode::Decorator(
DecoratorType::Repeat(3),
Box::new(BehaviorNode::Action("step".into())),
);
let mut rep_tree = BehaviorTree::new(repeated);
rep_tree.set_action_result("step", BehaviorStatus::Success);
let status = rep_tree.tick();
println!(" Repeat(3, Success) = {:?}", status);
// Timeout: fail after N ticks
let timed = BehaviorNode::Decorator(
DecoratorType::Timeout(2),
Box::new(BehaviorNode::Action("slow_task".into())),
);
let mut time_tree = BehaviorTree::new(timed);
time_tree.set_action_result("slow_task", BehaviorStatus::Running);
println!(" Timeout(2):");
for i in 0..4 {
let status = time_tree.tick();
println!(" Tick {}: {:?}", i, status);
}
// Step 6: Parallel node
println!();
println!("[6] Parallel node (threshold=2 of 3):");
let parallel = BehaviorNode::Parallel(
2,
vec![
BehaviorNode::Action("sensor_a".into()),
BehaviorNode::Action("sensor_b".into()),
BehaviorNode::Action("sensor_c".into()),
],
);
let mut par_tree = BehaviorTree::new(parallel);
par_tree.set_action_result("sensor_a", BehaviorStatus::Success);
par_tree.set_action_result("sensor_b", BehaviorStatus::Success);
par_tree.set_action_result("sensor_c", BehaviorStatus::Failure);
let status = par_tree.tick();
println!(" [S, S, F] = {:?} (2 >= threshold 2)", status);
par_tree.set_action_result("sensor_b", BehaviorStatus::Failure);
let status = par_tree.tick();
println!(" [S, F, F] = {:?} (1 < threshold 2)", status);
println!();
println!("[done] Behavior tree example complete.");
}

View File

@@ -0,0 +1,128 @@
/// Example 05: Cognitive Robot - Full perceive-think-act-learn loop
///
/// Demonstrates:
/// - Using CognitiveCore for autonomous decision making
/// - The perceive -> think -> act -> learn cycle
/// - Attention threshold adaptation from feedback
/// - Decision history and cumulative reward tracking
use ruvector_robotics::cognitive::{
ActionType, CognitiveConfig, CognitiveCore, CognitiveMode, Outcome, Percept,
};
fn main() {
println!("=== Example 05: Cognitive Robot ===");
println!();
// Step 1: Initialize cognitive core
let config = CognitiveConfig {
mode: CognitiveMode::Reactive,
attention_threshold: 0.5,
learning_rate: 0.01,
max_percepts: 100,
};
let mut core = CognitiveCore::new(config);
println!("[1] Cognitive core initialized:");
println!(" Mode: {:?}", core.mode());
println!(" State: {:?}", core.state());
println!();
// Step 2: Simulate 10 cognitive cycles
println!("[2] Running 10 cognitive cycles:");
println!();
let sensor_data: Vec<(&str, Vec<f64>, f64)> = vec![
("lidar", vec![2.0, 1.5, 0.0], 0.9),
("camera", vec![-1.0, 3.0, 0.5], 0.85),
("imu", vec![0.1, 0.2], 0.3), // below threshold -- will be dropped
("lidar", vec![4.0, 0.0, 0.0], 0.95),
("camera", vec![0.0, 5.0, 1.0], 0.7),
("sonar", vec![1.0, 1.0, 0.0], 0.6),
("camera", vec![-3.0, -2.0, 0.0], 0.88),
("lidar", vec![0.5, 0.5, 0.0], 0.92),
("depth", vec![2.5, 2.5, 1.0], 0.75),
("camera", vec![6.0, 0.0, 0.0], 0.4), // near threshold
];
for (i, (source, data, confidence)) in sensor_data.iter().enumerate() {
println!("--- Cycle {} ---", i);
// PERCEIVE
let percept = Percept {
source: source.to_string(),
data: data.clone(),
confidence: *confidence,
timestamp: (i * 100) as i64,
};
let state = core.perceive(percept);
println!(
" Perceive: source='{}', conf={:.2}, buffered={} [state={:?}]",
source, confidence, core.percept_count(), state
);
// THINK
if let Some(decision) = core.think() {
let action_desc = match &decision.action.action {
ActionType::Move(pos) => format!("Move({:.1}, {:.1}, {:.1})", pos[0], pos[1], pos[2]),
ActionType::Wait(ms) => format!("Wait({}ms)", ms),
_ => format!("{:?}", decision.action.action),
};
println!(
" Think: {} (utility={:.2}, priority={})",
decision.reasoning, decision.utility, decision.action.priority
);
// ACT
let _command = core.act(decision);
println!(" Act: {} [state={:?}]", action_desc, core.state());
// LEARN
let success = *confidence > 0.7;
let reward = if success { 1.0 } else { -0.5 };
core.learn(Outcome {
success,
reward,
description: format!("cycle_{}", i),
});
println!(
" Learn: success={}, reward={:.1}, cumulative={:.4}",
success, reward, core.cumulative_reward()
);
} else {
println!(" Think: no percepts to reason about");
}
println!();
}
// Step 3: Summary
println!("[3] Cognitive summary after 10 cycles:");
println!(" State: {:?}", core.state());
println!(" Decisions made: {}", core.decision_count());
println!(" Cumulative reward: {:.6}", core.cumulative_reward());
println!(" Buffered percepts: {}", core.percept_count());
// Step 4: Emergency mode demonstration
println!();
println!("[4] Emergency mode:");
let mut emergency_core = CognitiveCore::new(CognitiveConfig {
mode: CognitiveMode::Emergency,
attention_threshold: 0.1,
learning_rate: 0.05,
max_percepts: 10,
});
emergency_core.perceive(Percept {
source: "collision_sensor".into(),
data: vec![0.0, 0.0, 0.0],
confidence: 0.99,
timestamp: 0,
});
if let Some(decision) = emergency_core.think() {
println!(" Priority: {} (max for emergency)", decision.action.priority);
println!(" Reasoning: {}", decision.reasoning);
}
println!();
println!("[done] Cognitive robot example complete.");
}

View File

@@ -0,0 +1,167 @@
/// Example 06: Swarm Coordination - Multi-robot task assignment and formations
///
/// Demonstrates:
/// - Registering robots with capabilities in a SwarmCoordinator
/// - Assigning tasks based on capability matching
/// - Computing Line, Circle, and Grid formations
/// - Running consensus votes among swarm members
use ruvector_robotics::cognitive::{
Formation, FormationType, RobotCapabilities, SwarmConfig, SwarmCoordinator, SwarmTask,
};
fn main() {
println!("=== Example 06: Swarm Coordination ===");
println!();
// Step 1: Create and register robots
let config = SwarmConfig {
max_robots: 10,
communication_range: 50.0,
consensus_threshold: 0.5,
};
let mut coordinator = SwarmCoordinator::new(config);
let robots = vec![
RobotCapabilities {
id: 0,
max_speed: 1.5,
payload: 10.0,
sensors: vec!["lidar".into(), "camera".into()],
},
RobotCapabilities {
id: 1,
max_speed: 2.0,
payload: 5.0,
sensors: vec!["camera".into(), "imu".into()],
},
RobotCapabilities {
id: 2,
max_speed: 1.0,
payload: 20.0,
sensors: vec!["lidar".into(), "sonar".into()],
},
RobotCapabilities {
id: 3,
max_speed: 1.8,
payload: 8.0,
sensors: vec!["camera".into(), "depth".into(), "imu".into()],
},
RobotCapabilities {
id: 4,
max_speed: 1.2,
payload: 15.0,
sensors: vec!["lidar".into(), "camera".into(), "sonar".into()],
},
];
println!("[1] Registering {} robots:", robots.len());
for robot in robots {
let registered = coordinator.register_robot(robot.clone());
println!(
" Robot {}: speed={:.1}, payload={:.0}kg, sensors=[{}] -> {}",
robot.id,
robot.max_speed,
robot.payload,
robot.sensors.join(", "),
if registered { "OK" } else { "REJECTED" }
);
}
println!(" Total registered: {}", coordinator.robot_count());
println!();
// Step 2: Create and assign tasks
let tasks = vec![
SwarmTask {
id: 100,
description: "Scan area A with lidar".into(),
location: [3.0, 4.0, 0.0],
required_capabilities: vec!["lidar".into()],
priority: 8,
},
SwarmTask {
id: 101,
description: "Visual inspection of zone B".into(),
location: [5.0, 0.0, 0.0],
required_capabilities: vec!["camera".into()],
priority: 5,
},
SwarmTask {
id: 102,
description: "Underwater sonar survey".into(),
location: [0.0, 7.0, -2.0],
required_capabilities: vec!["sonar".into()],
priority: 10,
},
SwarmTask {
id: 103,
description: "Depth mapping of area C".into(),
location: [8.0, 8.0, 0.0],
required_capabilities: vec!["depth".into(), "camera".into()],
priority: 6,
},
];
println!("[2] Assigning {} tasks:", tasks.len());
let assignments = coordinator.assign_tasks(&tasks);
for assignment in &assignments {
let task = tasks.iter().find(|t| t.id == assignment.task_id).unwrap();
println!(
" Task {} ('{}') -> Robot {} (est. {:.1}s)",
assignment.task_id, task.description, assignment.robot_id,
assignment.estimated_completion
);
}
let unassigned = tasks.len() - assignments.len();
if unassigned > 0 {
println!(" {} tasks could not be assigned", unassigned);
}
println!();
// Step 3: Formation control
println!("[3] Formation control:");
let formations = vec![
("Line", FormationType::Line),
("Circle", FormationType::Circle),
("Grid", FormationType::Grid),
];
for (name, ftype) in &formations {
let formation = Formation {
formation_type: ftype.clone(),
spacing: 2.0,
center: [5.0, 5.0, 0.0],
};
let positions = coordinator.compute_formation(&formation);
println!(" {} formation (spacing=2.0m, center=(5,5)):", name);
for (i, pos) in positions.iter().enumerate() {
println!(
" Robot {} -> ({:6.2}, {:6.2}, {:6.2})",
i, pos[0], pos[1], pos[2]
);
}
}
println!();
// Step 4: Consensus voting
println!("[4] Consensus voting:");
let proposals = vec![
"explore-sector-alpha",
"return-to-base",
"form-defensive-perimeter",
];
for proposal in &proposals {
let result = coordinator.propose_consensus(proposal);
println!(
" '{}': for={}, against={}, {}",
result.proposal,
result.votes_for,
result.votes_against,
if result.accepted { "ACCEPTED" } else { "REJECTED" }
);
}
println!();
println!("[done] Swarm coordination example complete.");
}

View File

@@ -0,0 +1,129 @@
/// Example 07: Skill Learning - Learn from demos, execute, improve
///
/// Demonstrates:
/// - Recording expert demonstrations as 3D trajectories
/// - Learning a skill from multiple demonstrations (averaging)
/// - Executing the learned skill and tracking its trajectory
/// - Improving confidence through positive/negative feedback
/// - Using the SkillLibrary from ruvector_robotics::cognitive
use ruvector_robotics::cognitive::{Demonstration, SkillLibrary};
fn main() {
println!("=== Example 07: Skill Learning ===");
println!();
let mut library = SkillLibrary::new();
// -- Step 1: Record demonstrations for "reach" --
let demos = vec![
Demonstration {
trajectory: vec![[0.0, 0.0, 0.0], [1.0, 0.5, 0.0], [2.0, 1.0, 0.0], [3.0, 1.5, 0.0]],
timestamps: vec![0, 100, 200, 300],
metadata: "expert_1".into(),
},
Demonstration {
trajectory: vec![[0.0, 0.0, 0.0], [1.2, 0.4, 0.0], [2.1, 0.9, 0.0], [3.1, 1.6, 0.0]],
timestamps: vec![0, 110, 210, 310],
metadata: "expert_2".into(),
},
Demonstration {
trajectory: vec![[0.0, 0.0, 0.0], [0.8, 0.6, 0.0], [1.9, 1.1, 0.0], [2.9, 1.4, 0.0]],
timestamps: vec![0, 90, 190, 290],
metadata: "expert_3".into(),
},
];
println!("[1] Recorded {} demonstrations for 'reach':", demos.len());
for (i, demo) in demos.iter().enumerate() {
println!(
" Demo {} ({}): {} waypoints, duration={}us",
i,
demo.metadata,
demo.trajectory.len(),
demo.timestamps.last().unwrap_or(&0)
);
}
// -- Step 2: Learn the skill --
println!();
let skill = library.learn_from_demonstration("reach", &demos);
println!("[2] Learned skill 'reach':");
println!(" Trajectory length: {} waypoints", skill.trajectory.len());
println!(" Initial confidence: {:.3}", skill.confidence);
println!(" Averaged trajectory:");
for (i, pt) in skill.trajectory.iter().enumerate() {
println!(" wp {}: ({:.2}, {:.2}, {:.2})", i, pt[0], pt[1], pt[2]);
}
// -- Step 3: Learn another skill with a single demo --
println!();
let wave_demo = Demonstration {
trajectory: vec![[0.0, 0.0, 1.0], [0.5, 0.0, 1.5], [0.0, 0.0, 1.0], [-0.5, 0.0, 1.5]],
timestamps: vec![0, 200, 400, 600],
metadata: "single_demo".into(),
};
let wave_skill = library.learn_from_demonstration("wave", &[wave_demo]);
println!("[3] Learned skill 'wave' from 1 demo:");
println!(" Confidence: {:.3} (lower with fewer demos)", wave_skill.confidence);
println!(" Library now has {} skills", library.len());
// -- Step 4: Execute skills --
println!();
println!("[4] Executing skills:");
for _ in 0..3 {
if let Some(traj) = library.execute_skill("reach") {
println!(" 'reach' executed: {} waypoints", traj.len());
}
}
let reach = library.get("reach").unwrap();
println!(" 'reach' execution count: {}", reach.execution_count);
if let Some(traj) = library.execute_skill("wave") {
println!(" 'wave' executed: {} waypoints", traj.len());
}
let wave = library.get("wave").unwrap();
println!(" 'wave' execution count: {}", wave.execution_count);
// Try non-existent skill
let missing = library.execute_skill("backflip");
println!(" 'backflip' exists: {}", missing.is_some());
// -- Step 5: Improve through feedback --
println!();
println!("[5] Improving 'reach' through feedback:");
let before = library.get("reach").unwrap().confidence;
println!(" Before: confidence={:.4}", before);
// Positive feedback (5 successes)
for _ in 0..5 {
library.improve_skill("reach", 0.03);
}
let after_positive = library.get("reach").unwrap().confidence;
println!(" After 5 successes: confidence={:.4} (+{:.4})", after_positive, after_positive - before);
// Negative feedback (2 failures)
for _ in 0..2 {
library.improve_skill("reach", -0.05);
}
let after_negative = library.get("reach").unwrap().confidence;
println!(" After 2 failures: confidence={:.4} ({:.4})", after_negative, after_negative - after_positive);
// -- Step 6: Summary --
println!();
println!("[6] Skill library summary:");
for name in &["reach", "wave"] {
if let Some(skill) = library.get(name) {
println!(
" '{}': {} waypoints, confidence={:.3}, executed {} times",
skill.name,
skill.trajectory.len(),
skill.confidence,
skill.execution_count
);
}
}
println!();
println!("[done] Skill learning example complete.");
}

View File

@@ -0,0 +1,144 @@
/// Example 08: World Model - Object tracking, occupancy grid, predictions
///
/// Demonstrates:
/// - Creating a WorldModel with a square occupancy grid
/// - Tracking objects with position, velocity, and confidence
/// - Predicting future states via constant-velocity extrapolation
/// - Updating occupancy cells and checking path clearance
/// - Removing stale objects by age threshold
use ruvector_robotics::cognitive::{TrackedObject, WorldModel};
fn main() {
println!("=== Example 08: World Model ===");
println!();
// -- Step 1: Create world model --
let mut world = WorldModel::new(20, 0.5);
println!(
"[1] WorldModel created: {}x{} grid, resolution={:.1}m, world extent={:.0}m x {:.0}m",
world.grid_size(),
world.grid_size(),
world.grid_resolution(),
world.grid_size() as f64 * world.grid_resolution(),
world.grid_size() as f64 * world.grid_resolution(),
);
// -- Step 2: Track objects --
let objects = vec![
TrackedObject {
id: 1,
position: [2.0, 3.0, 0.0],
velocity: [0.5, 0.0, 0.0],
last_seen: 1000,
confidence: 0.95,
label: "robot_peer".into(),
},
TrackedObject {
id: 2,
position: [7.0, 1.0, 0.0],
velocity: [0.0, 0.3, 0.0],
last_seen: 1000,
confidence: 0.80,
label: "moving_box".into(),
},
TrackedObject {
id: 3,
position: [5.0, 5.0, 0.0],
velocity: [0.0, 0.0, 0.0],
last_seen: 500, // old observation
confidence: 0.60,
label: "static_pillar".into(),
},
];
for obj in &objects {
world.update_object(obj.clone());
}
println!();
println!("[2] Tracked {} objects:", world.object_count());
for obj in &objects {
println!(
" id={} '{}': pos=({:.1},{:.1},{:.1}), vel=({:.1},{:.1},{:.1}), conf={:.2}",
obj.id, obj.label,
obj.position[0], obj.position[1], obj.position[2],
obj.velocity[0], obj.velocity[1], obj.velocity[2],
obj.confidence
);
}
// -- Step 3: Predict future positions --
println!();
println!("[3] State predictions:");
for &(id, dt) in &[(1, 2.0), (1, 5.0), (2, 3.0)] {
if let Some(pred) = world.predict_state(id, dt) {
let label = &world.get_object(id).unwrap().label;
println!(
" '{}' at t+{:.0}s: pos=({:.1},{:.1},{:.1}), conf={:.3}",
label, dt, pred.position[0], pred.position[1], pred.position[2], pred.confidence
);
}
}
// -- Step 4: Update occupancy grid --
println!();
// Place an L-shaped wall
for x in 5..15 {
world.update_occupancy(x, 10, 0.9);
}
for y in 5..10 {
world.update_occupancy(14, y, 0.9);
}
println!("[4] Placed L-shaped wall in occupancy grid");
// -- Step 5: Path clearance --
println!();
println!("[5] Path clearance queries:");
let queries = [
([0, 0], [19, 0], "Bottom row"),
([0, 0], [0, 19], "Left column"),
([0, 5], [19, 5], "Through wall"),
([0, 0], [19, 19], "Diagonal"),
];
for (from, to, label) in &queries {
let clear = world.is_path_clear(*from, *to);
println!(
" ({:>2},{:>2}) -> ({:>2},{:>2}) [{}]: {}",
from[0], from[1], to[0], to[1], label,
if clear { "CLEAR" } else { "BLOCKED" }
);
}
// -- Step 6: Remove stale objects --
println!();
let removed = world.remove_stale_objects(1200, 300);
println!("[6] Removed {} stale objects (age > 300us)", removed);
println!(" Remaining: {} objects", world.object_count());
if let Some(obj) = world.get_object(3) {
println!(" (id=3 '{}' was stale and removed)", obj.label);
} else {
println!(" (id=3 'static_pillar' was stale and removed)");
}
// -- Step 7: Occupancy statistics --
println!();
let size = world.grid_size();
let mut occupied = 0;
let mut free = 0;
for y in 0..size {
for x in 0..size {
if let Some(v) = world.get_occupancy(x, y) {
if v >= 0.5 { occupied += 1; }
else { free += 1; }
}
}
}
let total = size * size;
println!("[7] Occupancy statistics:");
println!(" Total cells: {}", total);
println!(" Occupied: {} ({:.1}%)", occupied, 100.0 * occupied as f64 / total as f64);
println!(" Free: {} ({:.1}%)", free, 100.0 * free as f64 / total as f64);
println!();
println!("[done] World model example complete.");
}

View File

@@ -0,0 +1,100 @@
/// Example 09: MCP Tools - Tool registry and JSON schema generation
///
/// Demonstrates:
/// - Creating a RoboticsToolRegistry with built-in tools
/// - Listing all registered tools
/// - Filtering tools by category
/// - Looking up a specific tool and inspecting its parameters
/// - Registering a custom tool
/// - Generating MCP-compatible JSON schema
use ruvector_robotics::mcp::{
ParamType, RoboticsToolRegistry, ToolCategory, ToolDefinition, ToolParameter,
};
fn main() {
println!("=== Example 09: MCP Tools ===");
println!();
// -- Step 1: Create registry --
let mut registry = RoboticsToolRegistry::new();
println!("[1] Tool registry created: {} built-in tools", registry.list_tools().len());
// -- Step 2: List all tools --
println!();
println!("[2] All registered tools:");
let mut tools: Vec<_> = registry.list_tools().iter().map(|t| &t.name).collect();
tools.sort();
for name in &tools {
let tool = registry.get_tool(name).unwrap();
println!(
" {:.<30} {:?} ({} params)",
tool.name, tool.category, tool.parameters.len()
);
}
// -- Step 3: Filter by category --
println!();
let categories = [
ToolCategory::Perception,
ToolCategory::Navigation,
ToolCategory::Cognition,
ToolCategory::Memory,
ToolCategory::Planning,
ToolCategory::Swarm,
];
println!("[3] Tools by category:");
for cat in &categories {
let cat_tools = registry.list_by_category(*cat);
println!(" {:?}: {} tools", cat, cat_tools.len());
for tool in &cat_tools {
println!(" - {}", tool.name);
}
}
// -- Step 4: Inspect a specific tool --
println!();
if let Some(tool) = registry.get_tool("detect_obstacles") {
println!("[4] Tool details for 'detect_obstacles':");
println!(" Description: {}", tool.description);
println!(" Category: {:?}", tool.category);
println!(" Parameters:");
for param in &tool.parameters {
println!(
" - {} ({:?}, required={}): {}",
param.name, param.param_type, param.required, param.description
);
}
}
// -- Step 5: Register a custom tool --
println!();
let custom = ToolDefinition::new(
"custom_slam",
"Run SLAM algorithm on sensor data",
vec![
ToolParameter::new("point_cloud_json", "JSON-encoded point cloud", ParamType::String, true),
ToolParameter::new("odometry_json", "JSON-encoded odometry data", ParamType::String, false),
ToolParameter::new("resolution", "Map resolution in meters", ParamType::Number, false),
],
ToolCategory::Perception,
);
registry.register_tool(custom);
println!("[5] Registered custom tool 'custom_slam'. Total: {} tools", registry.list_tools().len());
// -- Step 6: Generate MCP schema --
println!();
let schema = registry.to_mcp_schema();
let json = serde_json::to_string_pretty(&schema).unwrap();
println!("[6] Full MCP schema ({} bytes):", json.len());
for (i, line) in json.lines().enumerate() {
if i > 14 {
println!(" ... ({} more lines)", json.lines().count() - i);
break;
}
println!(" {}", line);
}
println!();
println!("[done] MCP tools example complete.");
}

View File

@@ -0,0 +1,195 @@
/// Example 10: Full Pipeline - Sensor -> Perception -> Cognition -> Action
///
/// Combines all modules into an integrated simulation:
/// 1. Generate sensor point cloud
/// 2. Detect and classify obstacles via PerceptionPipeline
/// 3. Feed percepts into CognitiveCore (perceive-think-act-learn)
/// 4. Track objects in WorldModel
/// 5. Report comprehensive statistics
use rand::Rng;
use ruvector_robotics::bridge::{Point3D, PointCloud, SpatialIndex};
use ruvector_robotics::cognitive::{
CognitiveConfig, CognitiveCore, CognitiveMode, Outcome, Percept, TrackedObject, WorldModel,
};
use ruvector_robotics::mcp::RoboticsToolRegistry;
use ruvector_robotics::perception::{PerceptionConfig, PerceptionPipeline};
fn main() {
println!("============================================================");
println!(" Example 10: Full Cognitive Robotics Pipeline");
println!("============================================================");
println!();
let mut rng = rand::thread_rng();
// -- Initialize modules --
let mut pipeline = PerceptionPipeline::new(PerceptionConfig::default());
let mut core = CognitiveCore::new(CognitiveConfig {
mode: CognitiveMode::Reactive,
attention_threshold: 0.4,
learning_rate: 0.02,
max_percepts: 50,
});
let mut world = WorldModel::new(20, 0.5);
let mut index = SpatialIndex::new(3);
let registry = RoboticsToolRegistry::new();
let mut robot_pos = [1.0_f64, 1.0, 0.0];
let mut total_distance = 0.0_f64;
let mut decisions_made = 0usize;
println!("[1] Modules initialized:");
println!(" PerceptionPipeline: default config");
println!(" CognitiveCore: mode={:?}, state={:?}", core.mode(), core.state());
println!(" WorldModel: {}x{} grid", world.grid_size(), world.grid_size());
println!(" MCP registry: {} tools", registry.list_tools().len());
println!();
// -- Static obstacle points --
let mut obstacle_pts = Vec::new();
// Wall at y=4
for i in 0..40 {
obstacle_pts.push(Point3D::new(2.0 + i as f32 * 0.1, 4.0, 0.0));
}
// Box near (7, 2)
for dx in 0..5 {
for dy in 0..5 {
obstacle_pts.push(Point3D::new(7.0 + dx as f32 * 0.1, 2.0 + dy as f32 * 0.1, 0.0));
}
}
let total_steps = 30;
println!("[2] Running {} simulation steps...", total_steps);
println!();
for step in 0..total_steps {
// SENSOR: Build point cloud
let mut pts = obstacle_pts.clone();
for _ in 0..5 {
pts.push(Point3D::new(
rng.gen_range(0.0..10.0),
rng.gen_range(0.0..10.0),
0.0,
));
}
let cloud = PointCloud::new(pts, step as i64 * 100);
// PERCEIVE: Run perception pipeline
let (obstacles, _scene_graph) = pipeline.process(&cloud, &robot_pos);
index.insert_point_cloud(&cloud);
// Track obstacles in world model
for (i, obs) in obstacles.iter().enumerate() {
world.update_object(TrackedObject {
id: i as u64,
position: obs.center,
velocity: [0.0, 0.0, 0.0],
last_seen: step as i64 * 100,
confidence: 0.9,
label: format!("obs_{}", i),
});
}
// THINK: Feed percept to cognitive core
let nearest_dist = obstacles.first().map(|o| o.min_distance).unwrap_or(f64::MAX);
core.perceive(Percept {
source: "perception_pipeline".into(),
data: vec![robot_pos[0], robot_pos[1], nearest_dist],
confidence: 0.85,
timestamp: step as i64 * 100,
});
let action_label;
if let Some(decision) = core.think() {
decisions_made += 1;
let _cmd = core.act(decision);
action_label = if nearest_dist < 2.0 { "avoid" } else { "patrol" };
} else {
action_label = "idle";
}
// ACT: Move robot
let old_pos = robot_pos;
match action_label {
"avoid" => {
if let Some(obs) = obstacles.first() {
let dx = robot_pos[0] - obs.center[0];
let dy = robot_pos[1] - obs.center[1];
let d = (dx * dx + dy * dy).sqrt().max(0.01);
robot_pos[0] += 0.3 * dx / d;
robot_pos[1] += 0.3 * dy / d;
}
}
"patrol" => {
let angle: f64 = rng.gen_range(0.0..std::f64::consts::TAU);
robot_pos[0] += 0.25 * angle.cos();
robot_pos[1] += 0.25 * angle.sin();
}
_ => {}
}
robot_pos[0] = robot_pos[0].clamp(0.0, 10.0);
robot_pos[1] = robot_pos[1].clamp(0.0, 10.0);
let step_dist =
((robot_pos[0] - old_pos[0]).powi(2) + (robot_pos[1] - old_pos[1]).powi(2)).sqrt();
total_distance += step_dist;
// LEARN: Feedback
let success = nearest_dist > 1.0;
core.learn(Outcome {
success,
reward: if success { 1.0 } else { -0.5 },
description: format!("step_{}", step),
});
if step % 5 == 0 {
println!(
" Step {:>3}: pos=({:5.2},{:5.2}) action={:<8} obstacles={} state={:?}",
step,
robot_pos[0],
robot_pos[1],
action_label,
obstacles.len(),
core.state()
);
}
}
// -- Statistics --
println!();
println!("============================================================");
println!(" Results ({} steps)", total_steps);
println!("============================================================");
println!();
println!("[A] Movement:");
println!(" Final position: ({:.2}, {:.2})", robot_pos[0], robot_pos[1]);
println!(" Total distance: {:.2}m", total_distance);
println!(
" Avg speed: {:.3}m/step",
total_distance / total_steps as f64
);
println!();
println!("[B] Perception:");
println!(" Frames processed: {}", pipeline.frames_processed());
println!(" Spatial index: {} points", index.len());
println!();
println!("[C] Cognition:");
println!(" State: {:?}", core.state());
println!(" Decisions made: {}", decisions_made);
println!(" Cumulative reward: {:.4}", core.cumulative_reward());
println!();
println!("[D] World Model:");
println!(" Tracked objects: {}", world.object_count());
println!(" Grid size: {}x{}", world.grid_size(), world.grid_size());
println!();
println!("[E] MCP tools available: {}", registry.list_tools().len());
println!();
println!("[done] Full pipeline example complete.");
}

View File

@@ -0,0 +1,42 @@
/// RuVector Cognitive Robotics Examples
///
/// Each example demonstrates a distinct robotics capability built on top of
/// the unified ruvector-robotics crate.
///
/// Run any example with:
///
/// cargo run --bin <example_name>
fn main() {
println!("==========================================================");
println!(" RuVector Cognitive Robotics Examples");
println!("==========================================================");
println!();
println!("Available examples:");
println!();
println!(" PRACTICAL");
println!(" ---------");
println!(" 01_basic_perception Point cloud creation, kNN and radius search");
println!(" 02_obstacle_avoidance Detect obstacles, classify, compute distances");
println!(" 03_scene_graph Build scene graphs, compute edges, merge scenes");
println!();
println!(" INTERMEDIATE");
println!(" ------------");
println!(" 04_behavior_tree Patrol behavior tree with status transitions");
println!(" 05_cognitive_robot Perceive-think-act-learn cognitive loop");
println!(" 06_swarm_coordination Multi-robot task assignment and formations");
println!();
println!(" ADVANCED");
println!(" --------");
println!(" 07_skill_learning Learn skills from demos, execute, improve");
println!(" 08_world_model Occupancy grid, object tracking, path clearance");
println!(" 09_mcp_tools MCP tool registry and JSON schema generation");
println!();
println!(" FULL SYSTEM");
println!(" -----------");
println!(" 10_full_pipeline Sensor -> Perception -> Cognition -> Action");
println!();
println!("Run an example:");
println!(" cargo run --bin 01_basic_perception");
println!(" cargo run --bin 10_full_pipeline");
println!();
}