Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
[package]
name = "agentic-robotics-benchmarks"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
publish = false
[dependencies]
agentic-robotics-core = { path = "../agentic-robotics-core", version = "0.1.1" }
agentic-robotics-rt = { path = "../agentic-robotics-rt", version = "0.1.1" }
criterion = { version = "0.5", features = ["html_reports"] }
tokio = { version = "1.40", features = ["full"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
[[bench]]
name = "message_serialization"
harness = false
[[bench]]
name = "pubsub_latency"
harness = false
[[bench]]
name = "executor_performance"
harness = false

View File

@@ -0,0 +1,255 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId};
use ros3_rt::executor::{ROS3Executor, Priority, Deadline};
use ros3_rt::scheduler::PriorityScheduler;
use std::time::Duration;
fn benchmark_executor_creation(c: &mut Criterion) {
let mut group = c.benchmark_group("Executor Creation");
group.bench_function("create_executor", |b| {
b.iter(|| {
let executor = ROS3Executor::new().unwrap();
black_box(executor)
})
});
group.finish();
}
fn benchmark_task_spawning(c: &mut Criterion) {
let mut group = c.benchmark_group("Task Spawning");
let executor = ROS3Executor::new().unwrap();
group.bench_function("spawn_high_priority", |b| {
b.iter(|| {
executor.spawn_rt(
Priority::High,
Deadline(Duration::from_micros(100)),
async {
// Minimal async task
black_box(42);
},
);
})
});
group.bench_function("spawn_low_priority", |b| {
b.iter(|| {
executor.spawn_rt(
Priority::Low,
Deadline(Duration::from_millis(100)),
async {
// Minimal async task
black_box(42);
},
);
})
});
group.finish();
}
fn benchmark_scheduler_overhead(c: &mut Criterion) {
let mut group = c.benchmark_group("Scheduler Overhead");
let scheduler = PriorityScheduler::new();
group.bench_function("priority_low", |b| {
b.iter(|| {
scheduler.should_use_high_priority(
black_box(Priority::Low),
black_box(Deadline(Duration::from_millis(100))),
);
})
});
group.bench_function("priority_high", |b| {
b.iter(|| {
scheduler.should_use_high_priority(
black_box(Priority::High),
black_box(Deadline(Duration::from_micros(100))),
);
})
});
group.bench_function("deadline_check_fast", |b| {
b.iter(|| {
scheduler.should_use_high_priority(
black_box(Priority::Medium),
black_box(Deadline(Duration::from_micros(500))),
);
})
});
group.bench_function("deadline_check_slow", |b| {
b.iter(|| {
scheduler.should_use_high_priority(
black_box(Priority::Medium),
black_box(Deadline(Duration::from_secs(1))),
);
})
});
group.finish();
}
fn benchmark_task_distribution(c: &mut Criterion) {
let mut group = c.benchmark_group("Task Distribution");
for num_tasks in [10, 100, 1000].iter() {
group.bench_with_input(
BenchmarkId::new("spawn_tasks", num_tasks),
num_tasks,
|b, &count| {
b.iter(|| {
let executor = ROS3Executor::new().unwrap();
for i in 0..count {
let priority = if i % 3 == 0 {
Priority::High
} else if i % 3 == 1 {
Priority::Medium
} else {
Priority::Low
};
let deadline = if priority == Priority::High {
Deadline(Duration::from_micros(100))
} else {
Deadline(Duration::from_millis(10))
};
executor.spawn_rt(priority, deadline, async move {
black_box(i);
});
}
black_box(executor)
})
},
);
}
group.finish();
}
fn benchmark_async_task_execution(c: &mut Criterion) {
let mut group = c.benchmark_group("Async Task Execution");
group.sample_size(50);
let executor = ROS3Executor::new().unwrap();
group.bench_function("execute_sync_task", |b| {
b.iter(|| {
executor.spawn_rt(
Priority::High,
Deadline(Duration::from_micros(100)),
async {
// Synchronous computation
let mut sum = 0;
for i in 0..100 {
sum += i;
}
black_box(sum)
},
);
})
});
group.bench_function("execute_with_yield", |b| {
b.iter(|| {
executor.spawn_rt(
Priority::Medium,
Deadline(Duration::from_millis(1)),
async {
// Yield to executor
tokio::task::yield_now().await;
black_box(42)
},
);
})
});
group.finish();
}
fn benchmark_priority_handling(c: &mut Criterion) {
let mut group = c.benchmark_group("Priority Handling");
let executor = ROS3Executor::new().unwrap();
// Mix of priorities
group.bench_function("mixed_priorities", |b| {
b.iter(|| {
// High priority task
executor.spawn_rt(
Priority::High,
Deadline(Duration::from_micros(50)),
async { black_box(1) },
);
// Medium priority task
executor.spawn_rt(
Priority::Medium,
Deadline(Duration::from_millis(1)),
async { black_box(2) },
);
// Low priority task
executor.spawn_rt(
Priority::Low,
Deadline(Duration::from_millis(100)),
async { black_box(3) },
);
})
});
group.finish();
}
fn benchmark_deadline_distribution(c: &mut Criterion) {
let mut group = c.benchmark_group("Deadline Distribution");
let executor = ROS3Executor::new().unwrap();
// Tight deadlines (should use high priority runtime)
group.bench_function("tight_deadlines", |b| {
b.iter(|| {
for _ in 0..10 {
executor.spawn_rt(
Priority::High,
Deadline(Duration::from_micros(100)),
async { black_box(42) },
);
}
})
});
// Loose deadlines (should use low priority runtime)
group.bench_function("loose_deadlines", |b| {
b.iter(|| {
for _ in 0..10 {
executor.spawn_rt(
Priority::Low,
Deadline(Duration::from_millis(100)),
async { black_box(42) },
);
}
})
});
group.finish();
}
criterion_group!(
benches,
benchmark_executor_creation,
benchmark_task_spawning,
benchmark_scheduler_overhead,
benchmark_task_distribution,
benchmark_async_task_execution,
benchmark_priority_handling,
benchmark_deadline_distribution
);
criterion_main!(benches);

View File

@@ -0,0 +1,187 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId, Throughput};
use ros3_core::message::{RobotState, PointCloud, Pose};
use ros3_core::serialization::{serialize_cdr, deserialize_cdr, serialize_json, deserialize_json};
fn benchmark_cdr_serialization(c: &mut Criterion) {
let mut group = c.benchmark_group("CDR Serialization");
// Small message (RobotState)
let robot_state = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
group.throughput(Throughput::Bytes(std::mem::size_of::<RobotState>() as u64));
group.bench_function("RobotState", |b| {
b.iter(|| {
let serialized = serialize_cdr(black_box(&robot_state)).unwrap();
black_box(serialized)
})
});
// Medium message (Pose)
let pose = Pose {
position: [1.0, 2.0, 3.0],
orientation: [0.0, 0.0, 0.0, 1.0],
frame_id: "world".to_string(),
timestamp: 123456789,
};
group.throughput(Throughput::Bytes(std::mem::size_of::<Pose>() as u64 + 10));
group.bench_function("Pose", |b| {
b.iter(|| {
let serialized = serialize_cdr(black_box(&pose)).unwrap();
black_box(serialized)
})
});
// Large message (PointCloud with 1000 points)
let mut points = Vec::with_capacity(1000);
for i in 0..1000 {
points.push([i as f32 * 0.01, i as f32 * 0.02, i as f32 * 0.03]);
}
let pointcloud = PointCloud {
points,
timestamp: 123456789,
frame_id: "lidar".to_string(),
};
let size_bytes = pointcloud.points.len() * std::mem::size_of::<[f32; 3]>();
group.throughput(Throughput::Bytes(size_bytes as u64));
group.bench_function("PointCloud_1k", |b| {
b.iter(|| {
let serialized = serialize_cdr(black_box(&pointcloud)).unwrap();
black_box(serialized)
})
});
group.finish();
}
fn benchmark_cdr_deserialization(c: &mut Criterion) {
let mut group = c.benchmark_group("CDR Deserialization");
// Pre-serialize messages for deserialization benchmarks
let robot_state = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
let robot_state_bytes = serialize_cdr(&robot_state).unwrap();
group.throughput(Throughput::Bytes(robot_state_bytes.len() as u64));
group.bench_function("RobotState", |b| {
b.iter(|| {
let deserialized: RobotState = deserialize_cdr(black_box(&robot_state_bytes)).unwrap();
black_box(deserialized)
})
});
let pose = Pose {
position: [1.0, 2.0, 3.0],
orientation: [0.0, 0.0, 0.0, 1.0],
frame_id: "world".to_string(),
timestamp: 123456789,
};
let pose_bytes = serialize_cdr(&pose).unwrap();
group.throughput(Throughput::Bytes(pose_bytes.len() as u64));
group.bench_function("Pose", |b| {
b.iter(|| {
let deserialized: Pose = deserialize_cdr(black_box(&pose_bytes)).unwrap();
black_box(deserialized)
})
});
group.finish();
}
fn benchmark_json_vs_cdr(c: &mut Criterion) {
let mut group = c.benchmark_group("JSON vs CDR");
let robot_state = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
group.bench_function("CDR_serialize", |b| {
b.iter(|| {
let serialized = serialize_cdr(black_box(&robot_state)).unwrap();
black_box(serialized)
})
});
group.bench_function("JSON_serialize", |b| {
b.iter(|| {
let serialized = serialize_json(black_box(&robot_state)).unwrap();
black_box(serialized)
})
});
let cdr_bytes = serialize_cdr(&robot_state).unwrap();
let json_bytes = serialize_json(&robot_state).unwrap();
group.bench_function("CDR_deserialize", |b| {
b.iter(|| {
let deserialized: RobotState = deserialize_cdr(black_box(&cdr_bytes)).unwrap();
black_box(deserialized)
})
});
group.bench_function("JSON_deserialize", |b| {
b.iter(|| {
let deserialized: RobotState = deserialize_json(black_box(&json_bytes)).unwrap();
black_box(deserialized)
})
});
// Report size comparison
println!("\nSerialization size comparison for RobotState:");
println!(" CDR: {} bytes", cdr_bytes.len());
println!(" JSON: {} bytes", json_bytes.len());
println!(" Ratio: {:.2}x", json_bytes.len() as f64 / cdr_bytes.len() as f64);
group.finish();
}
fn benchmark_message_sizes(c: &mut Criterion) {
let mut group = c.benchmark_group("Message Size Scaling");
// Benchmark serialization with different point cloud sizes
for size in [100, 1000, 10000, 100000].iter() {
let mut points = Vec::with_capacity(*size);
for i in 0..*size {
points.push([i as f32 * 0.01, i as f32 * 0.02, i as f32 * 0.03]);
}
let pointcloud = PointCloud {
points,
timestamp: 123456789,
frame_id: "lidar".to_string(),
};
let size_bytes = pointcloud.points.len() * std::mem::size_of::<[f32; 3]>();
group.throughput(Throughput::Bytes(size_bytes as u64));
group.bench_with_input(BenchmarkId::new("PointCloud", size), &pointcloud, |b, pc| {
b.iter(|| {
let serialized = serialize_cdr(black_box(pc)).unwrap();
black_box(serialized)
})
});
}
group.finish();
}
criterion_group!(
benches,
benchmark_cdr_serialization,
benchmark_cdr_deserialization,
benchmark_json_vs_cdr,
benchmark_message_sizes
);
criterion_main!(benches);

View File

@@ -0,0 +1,193 @@
use criterion::{black_box, criterion_group, criterion_main, Criterion, BenchmarkId};
use ros3_core::message::RobotState;
use ros3_core::publisher::Publisher;
use ros3_core::subscriber::Subscriber;
use ros3_core::serialization::Serializer;
use std::time::{Duration, Instant};
fn benchmark_publisher_creation(c: &mut Criterion) {
let mut group = c.benchmark_group("Publisher Creation");
group.bench_function("create_publisher", |b| {
b.iter(|| {
let publisher = Publisher::<RobotState>::new(
black_box("test_topic".to_string()),
Serializer::Cdr,
);
black_box(publisher)
})
});
group.finish();
}
fn benchmark_subscriber_creation(c: &mut Criterion) {
let mut group = c.benchmark_group("Subscriber Creation");
group.bench_function("create_subscriber", |b| {
b.iter(|| {
let subscriber = Subscriber::<RobotState>::new(
black_box("test_topic".to_string()),
Serializer::Cdr,
);
black_box(subscriber)
})
});
group.finish();
}
fn benchmark_publish_latency(c: &mut Criterion) {
let mut group = c.benchmark_group("Publish Latency");
let publisher = Publisher::<RobotState>::new("bench_topic".to_string(), Serializer::Cdr);
let message = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
group.bench_function("single_publish", |b| {
b.iter(|| {
let result = futures::executor::block_on(publisher.publish(black_box(&message)));
black_box(result)
})
});
group.finish();
}
fn benchmark_publish_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("Publish Throughput");
let publisher = Publisher::<RobotState>::new("bench_topic".to_string(), Serializer::Cdr);
let message = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
// Benchmark burst publishing
for batch_size in [10, 100, 1000].iter() {
group.bench_with_input(
BenchmarkId::new("batch_publish", batch_size),
batch_size,
|b, &size| {
b.iter(|| {
for _ in 0..size {
futures::executor::block_on(publisher.publish(black_box(&message))).ok();
}
})
},
);
}
group.finish();
}
fn benchmark_end_to_end_latency(c: &mut Criterion) {
let mut group = c.benchmark_group("End-to-End Latency");
group.sample_size(100); // Reduce sample size for async operations
// Measure full publish-subscribe round trip
group.bench_function("pubsub_roundtrip", |b| {
b.iter_custom(|iters| {
let publisher = Publisher::<RobotState>::new("latency_topic".to_string(), Serializer::Cdr);
let _subscriber = Subscriber::<RobotState>::new("latency_topic".to_string(), Serializer::Cdr);
let start = Instant::now();
for i in 0..iters {
let message = RobotState {
position: [i as f64, i as f64, i as f64],
velocity: [0.1, 0.2, 0.3],
timestamp: i as i64,
};
futures::executor::block_on(publisher.publish(&message)).ok();
}
start.elapsed()
})
});
group.finish();
}
fn benchmark_serializer_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("Serializer Comparison");
let message = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
// CDR serializer
let cdr_publisher = Publisher::<RobotState>::new("cdr_topic".to_string(), Serializer::Cdr);
group.bench_function("CDR_publish", |b| {
b.iter(|| {
futures::executor::block_on(cdr_publisher.publish(black_box(&message))).ok();
})
});
// JSON serializer
let json_publisher = Publisher::<RobotState>::new("json_topic".to_string(), Serializer::Json);
group.bench_function("JSON_publish", |b| {
b.iter(|| {
futures::executor::block_on(json_publisher.publish(black_box(&message))).ok();
})
});
group.finish();
}
fn benchmark_concurrent_publishers(c: &mut Criterion) {
let mut group = c.benchmark_group("Concurrent Publishers");
group.sample_size(50);
for num_publishers in [1, 2, 4, 8].iter() {
group.bench_with_input(
BenchmarkId::new("concurrent", num_publishers),
num_publishers,
|b, &count| {
b.iter(|| {
let publishers: Vec<_> = (0..count)
.map(|i| {
Publisher::<RobotState>::new(
format!("topic_{}", i),
Serializer::Cdr,
)
})
.collect();
let message = RobotState {
position: [1.0, 2.0, 3.0],
velocity: [0.1, 0.2, 0.3],
timestamp: 123456789,
};
for publisher in &publishers {
futures::executor::block_on(publisher.publish(&message)).ok();
}
})
},
);
}
group.finish();
}
criterion_group!(
benches,
benchmark_publisher_creation,
benchmark_subscriber_creation,
benchmark_publish_latency,
benchmark_publish_throughput,
benchmark_end_to_end_latency,
benchmark_serializer_comparison,
benchmark_concurrent_publishers
);
criterion_main!(benches);