Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,330 @@
//! Swarm agent implementation
//!
//! Core agent that handles communication, learning sync, and task execution.
use crate::{
intelligence::IntelligenceSync,
memory::VectorMemory,
protocol::{MessagePayload, MessageType, SwarmMessage},
transport::{TransportConfig, TransportFactory, TransportHandle},
Result, SwarmConfig, SwarmError,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::{mpsc, RwLock};
use tokio::time::{interval, Duration};
/// Agent roles in the swarm
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AgentRole {
/// Coordinator manages the swarm
Coordinator,
/// Worker executes tasks
Worker,
/// Scout explores and gathers information
Scout,
/// Specialist has domain expertise
Specialist,
}
impl Default for AgentRole {
fn default() -> Self {
AgentRole::Worker
}
}
/// Peer agent info
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerInfo {
pub agent_id: String,
pub role: AgentRole,
pub capabilities: Vec<String>,
pub last_seen: u64,
pub connected: bool,
}
/// Swarm agent
pub struct SwarmAgent {
config: SwarmConfig,
transport: Option<TransportHandle>,
intelligence: Arc<IntelligenceSync>,
memory: Arc<VectorMemory>,
peers: Arc<RwLock<HashMap<String, PeerInfo>>>,
message_tx: mpsc::Sender<SwarmMessage>,
message_rx: Arc<RwLock<mpsc::Receiver<SwarmMessage>>>,
running: Arc<RwLock<bool>>,
}
impl SwarmAgent {
/// Create new swarm agent
pub async fn new(config: SwarmConfig) -> Result<Self> {
let intelligence = Arc::new(IntelligenceSync::new(&config.agent_id));
let memory = Arc::new(VectorMemory::new(&config.agent_id, 10000));
let (message_tx, message_rx) = mpsc::channel(1024);
Ok(Self {
config,
transport: None,
intelligence,
memory,
peers: Arc::new(RwLock::new(HashMap::new())),
message_tx,
message_rx: Arc::new(RwLock::new(message_rx)),
running: Arc::new(RwLock::new(false)),
})
}
/// Get agent ID
pub fn id(&self) -> &str {
&self.config.agent_id
}
/// Get agent role
pub fn role(&self) -> AgentRole {
self.config.agent_role
}
/// Connect to swarm
pub async fn join_swarm(&mut self, coordinator_url: &str) -> Result<()> {
tracing::info!("Joining swarm at {}", coordinator_url);
// Create transport
let transport_config = TransportConfig {
transport_type: self.config.transport,
..Default::default()
};
let transport = TransportFactory::create(&transport_config, Some(coordinator_url)).await?;
self.transport = Some(transport);
// Send join message
let join_msg = SwarmMessage::join(
&self.config.agent_id,
&format!("{:?}", self.config.agent_role),
vec!["learning".to_string(), "memory".to_string()],
);
self.send_message(join_msg).await?;
*self.running.write().await = true;
tracing::info!("Joined swarm successfully");
Ok(())
}
/// Leave swarm gracefully
pub async fn leave_swarm(&mut self) -> Result<()> {
tracing::info!("Leaving swarm");
*self.running.write().await = false;
let leave_msg = SwarmMessage::leave(&self.config.agent_id);
self.send_message(leave_msg).await?;
self.transport = None;
Ok(())
}
/// Send message to swarm
pub async fn send_message(&self, msg: SwarmMessage) -> Result<()> {
if let Some(ref transport) = self.transport {
let bytes = msg.to_bytes().map_err(|e| SwarmError::Serialization(e.to_string()))?;
transport.send(bytes).await?;
}
Ok(())
}
/// Broadcast message to all peers
pub async fn broadcast(&self, msg: SwarmMessage) -> Result<()> {
self.send_message(msg).await
}
/// Sync learning patterns with swarm
pub async fn sync_patterns(&self) -> Result<()> {
let state = self.intelligence.get_state();
let msg = SwarmMessage::sync_patterns(&self.config.agent_id, state);
self.broadcast(msg).await
}
/// Request patterns from specific peer
pub async fn request_patterns_from(&self, peer_id: &str, since_version: u64) -> Result<()> {
let msg = SwarmMessage::directed(
MessageType::RequestPatterns,
&self.config.agent_id,
peer_id,
MessagePayload::Request(crate::protocol::RequestPayload {
since_version,
max_entries: 1000,
}),
);
self.send_message(msg).await
}
/// Update learning pattern locally
pub async fn learn(&self, state: &str, action: &str, reward: f64) {
self.intelligence.update_pattern(state, action, reward);
}
/// Get best action for state
pub async fn get_best_action(&self, state: &str, actions: &[String]) -> Option<(String, f64)> {
self.intelligence.get_best_action(state, actions)
}
/// Store vector in shared memory
pub async fn store_memory(&self, content: &str, embedding: Vec<f32>) -> Result<String> {
self.memory.store(content, embedding)
}
/// Search vector memory
pub async fn search_memory(&self, query: &[f32], top_k: usize) -> Vec<(String, f32)> {
self.memory
.search(query, top_k)
.into_iter()
.map(|(entry, score)| (entry.content, score))
.collect()
}
/// Get connected peers
pub async fn get_peers(&self) -> Vec<PeerInfo> {
self.peers.read().await.values().cloned().collect()
}
/// Get swarm statistics
pub async fn get_stats(&self) -> AgentStats {
let intelligence_stats = self.intelligence.get_swarm_stats();
let memory_stats = self.memory.stats();
let peers = self.peers.read().await;
AgentStats {
agent_id: self.config.agent_id.clone(),
role: self.config.agent_role,
connected_peers: peers.len(),
total_patterns: intelligence_stats.total_patterns,
total_memories: memory_stats.total_entries,
avg_confidence: intelligence_stats.avg_confidence,
is_running: *self.running.read().await,
}
}
/// Start background sync loop
pub async fn start_sync_loop(&self) {
let intelligence = self.intelligence.clone();
let config = self.config.clone();
let running = self.running.clone();
let message_tx = self.message_tx.clone();
tokio::spawn(async move {
let mut sync_interval = interval(Duration::from_millis(config.sync_interval_ms));
while *running.read().await {
sync_interval.tick().await;
// Sync patterns periodically
if config.enable_learning {
let state = intelligence.get_state();
let msg = SwarmMessage::sync_patterns(&config.agent_id, state);
let _ = message_tx.send(msg).await;
}
}
});
}
/// Handle incoming message
pub async fn handle_message(&self, msg: SwarmMessage) -> Result<()> {
match msg.message_type {
MessageType::Join => {
if let MessagePayload::Join(payload) = msg.payload {
let sender_id = msg.sender_id.clone();
let peer = PeerInfo {
agent_id: sender_id.clone(),
role: match payload.agent_role.as_str() {
"Coordinator" => AgentRole::Coordinator,
"Scout" => AgentRole::Scout,
"Specialist" => AgentRole::Specialist,
_ => AgentRole::Worker,
},
capabilities: payload.capabilities,
last_seen: chrono::Utc::now().timestamp_millis() as u64,
connected: true,
};
self.peers.write().await.insert(sender_id, peer);
}
}
MessageType::Leave => {
self.peers.write().await.remove(&msg.sender_id);
}
MessageType::Ping => {
let pong = SwarmMessage::pong(&self.config.agent_id);
self.send_message(pong).await?;
}
MessageType::SyncPatterns => {
if let MessagePayload::Patterns(payload) = msg.payload {
self.intelligence
.merge_peer_state(&msg.sender_id, &serde_json::to_vec(&payload.state).unwrap())?;
}
}
MessageType::RequestPatterns => {
if let MessagePayload::Request(payload) = msg.payload {
let delta = self.intelligence.get_delta(payload.since_version);
let response = SwarmMessage::sync_patterns(&self.config.agent_id, delta);
self.send_message(response).await?;
}
}
_ => {}
}
// Update peer last_seen
if let Some(peer) = self.peers.write().await.get_mut(&msg.sender_id) {
peer.last_seen = chrono::Utc::now().timestamp_millis() as u64;
}
Ok(())
}
}
/// Agent statistics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentStats {
pub agent_id: String,
pub role: AgentRole,
pub connected_peers: usize,
pub total_patterns: usize,
pub total_memories: usize,
pub avg_confidence: f64,
pub is_running: bool,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Transport;
#[tokio::test]
async fn test_agent_creation() {
let config = SwarmConfig::default()
.with_agent_id("test-agent")
.with_transport(Transport::SharedMemory);
let agent = SwarmAgent::new(config).await.unwrap();
assert_eq!(agent.id(), "test-agent");
assert!(matches!(agent.role(), AgentRole::Worker));
}
#[tokio::test]
async fn test_agent_learning() {
let config = SwarmConfig::default().with_agent_id("learning-agent");
let agent = SwarmAgent::new(config).await.unwrap();
agent.learn("edit_ts", "coder", 0.8).await;
agent.learn("edit_ts", "reviewer", 0.6).await;
let actions = vec!["coder".to_string(), "reviewer".to_string()];
let best = agent.get_best_action("edit_ts", &actions).await;
assert!(best.is_some());
assert_eq!(best.unwrap().0, "coder");
}
}

View File

@@ -0,0 +1,114 @@
//! Edge Agent Binary
//!
//! Run a single swarm agent that can connect to a coordinator.
use clap::Parser;
use ruvector_edge::prelude::*;
use ruvector_edge::Transport;
use std::time::Duration;
use tokio::signal;
use tokio::time::interval;
#[derive(Parser, Debug)]
#[command(name = "edge-agent")]
#[command(about = "RuVector Edge Swarm Agent")]
struct Args {
/// Agent ID (auto-generated if not provided)
#[arg(short, long)]
id: Option<String>,
/// Agent role: coordinator, worker, scout, specialist
#[arg(short, long, default_value = "worker")]
role: String,
/// Coordinator URL to connect to
#[arg(short, long)]
coordinator: Option<String>,
/// Transport type: websocket, shared-memory
#[arg(short, long, default_value = "shared-memory")]
transport: String,
/// Sync interval in milliseconds
#[arg(long, default_value = "1000")]
sync_interval: u64,
/// Enable verbose logging
#[arg(short, long)]
verbose: bool,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
// Initialize tracing
let level = if args.verbose { "debug" } else { "info" };
tracing_subscriber::fmt()
.with_env_filter(level)
.init();
// Parse role
let role = match args.role.to_lowercase().as_str() {
"coordinator" => AgentRole::Coordinator,
"scout" => AgentRole::Scout,
"specialist" => AgentRole::Specialist,
_ => AgentRole::Worker,
};
// Parse transport
let transport = match args.transport.to_lowercase().as_str() {
"websocket" | "ws" => Transport::WebSocket,
_ => Transport::SharedMemory,
};
// Create config
let mut config = SwarmConfig::default()
.with_role(role)
.with_transport(transport);
if let Some(id) = args.id {
config = config.with_agent_id(id);
}
if let Some(url) = &args.coordinator {
config = config.with_coordinator(url);
}
config.sync_interval_ms = args.sync_interval;
// Create agent
let mut agent = SwarmAgent::new(config).await?;
tracing::info!("Agent created: {} ({:?})", agent.id(), agent.role());
// Connect if coordinator URL provided
if let Some(ref url) = args.coordinator {
tracing::info!("Connecting to coordinator: {}", url);
agent.join_swarm(url).await?;
agent.start_sync_loop().await;
} else if matches!(role, AgentRole::Coordinator) {
tracing::info!("Running as standalone coordinator");
}
// Print status periodically
let agent_id = agent.id().to_string();
let stats_interval = Duration::from_secs(10);
tokio::spawn(async move {
let mut ticker = interval(stats_interval);
loop {
ticker.tick().await;
tracing::info!("Agent {} heartbeat", agent_id);
}
});
// Wait for shutdown signal
tracing::info!("Agent running. Press Ctrl+C to stop.");
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
tracing::info!("Shutting down...");
agent.leave_swarm().await?;
Ok(())
}

View File

@@ -0,0 +1,93 @@
//! Edge Coordinator Binary
//!
//! Run a swarm coordinator that manages connected agents.
use clap::Parser;
use ruvector_edge::prelude::*;
use ruvector_edge::Transport;
use std::time::Duration;
use tokio::signal;
use tokio::time::interval;
#[derive(Parser, Debug)]
#[command(name = "edge-coordinator")]
#[command(about = "RuVector Edge Swarm Coordinator")]
struct Args {
/// Coordinator ID
#[arg(short, long, default_value = "coordinator-001")]
id: String,
/// Listen address for WebSocket connections
#[arg(short, long, default_value = "0.0.0.0:8080")]
listen: String,
/// Transport type: websocket, shared-memory
#[arg(short, long, default_value = "shared-memory")]
transport: String,
/// Maximum connected agents
#[arg(long, default_value = "100")]
max_agents: usize,
/// Enable verbose logging
#[arg(short, long)]
verbose: bool,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
// Initialize tracing
let level = if args.verbose { "debug" } else { "info" };
tracing_subscriber::fmt()
.with_env_filter(level)
.init();
// Parse transport
let transport = match args.transport.to_lowercase().as_str() {
"websocket" | "ws" => Transport::WebSocket,
_ => Transport::SharedMemory,
};
// Create config
let config = SwarmConfig::default()
.with_agent_id(&args.id)
.with_role(AgentRole::Coordinator)
.with_transport(transport);
// Create coordinator agent
let agent = SwarmAgent::new(config).await?;
println!("🎯 RuVector Edge Coordinator");
println!(" ID: {}", agent.id());
println!(" Transport: {:?}", transport);
println!(" Max Agents: {}", args.max_agents);
println!();
// Start sync loop for coordinator duties
agent.start_sync_loop().await;
// Status reporting
let stats_interval = Duration::from_secs(5);
tokio::spawn({
let agent_id = agent.id().to_string();
async move {
let mut ticker = interval(stats_interval);
loop {
ticker.tick().await;
// In real implementation, would report actual peer stats
tracing::info!("Coordinator {} status: healthy", agent_id);
}
}
});
println!("✅ Coordinator running. Press Ctrl+C to stop.\n");
// Wait for shutdown
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
println!("\n👋 Coordinator shutting down...");
Ok(())
}

View File

@@ -0,0 +1,127 @@
//! Edge Swarm Demo
//!
//! Demonstrates distributed learning across multiple agents.
use ruvector_edge::prelude::*;
use ruvector_edge::Transport;
#[tokio::main]
async fn main() -> Result<()> {
// Initialize tracing
tracing_subscriber::fmt()
.with_env_filter("info")
.init();
println!("🚀 RuVector Edge Swarm Demo\n");
// Create coordinator agent
let coordinator_config = SwarmConfig::default()
.with_agent_id("coordinator-001")
.with_role(AgentRole::Coordinator)
.with_transport(Transport::SharedMemory);
let coordinator = SwarmAgent::new(coordinator_config).await?;
println!("✅ Coordinator created: {}", coordinator.id());
// Create worker agents
let mut workers = Vec::new();
for i in 1..=3 {
let config = SwarmConfig::default()
.with_agent_id(format!("worker-{:03}", i))
.with_role(AgentRole::Worker)
.with_transport(Transport::SharedMemory);
let worker = SwarmAgent::new(config).await?;
println!("✅ Worker created: {}", worker.id());
workers.push(worker);
}
println!("\n📚 Simulating distributed learning...\n");
// Simulate learning across agents
let learning_scenarios = vec![
("edit_ts", "typescript-developer", 0.9),
("edit_rs", "rust-developer", 0.95),
("edit_py", "python-developer", 0.85),
("test_run", "test-engineer", 0.8),
("review_pr", "reviewer", 0.88),
];
for (i, worker) in workers.iter().enumerate() {
// Each worker learns from different scenarios
for (j, (state, action, reward)) in learning_scenarios.iter().enumerate() {
// Distribute scenarios across workers
if j % 3 == i {
worker.learn(state, action, *reward).await;
println!(
" {} learned: {}{} (reward: {:.2})",
worker.id(),
state,
action,
reward
);
}
}
}
println!("\n🔄 Syncing patterns across swarm...\n");
// Simulate pattern sync (in real implementation, this goes over network)
for worker in &workers {
let state = worker.get_best_action("edit_ts", &["coder".to_string(), "typescript-developer".to_string()]).await;
if let Some((action, confidence)) = state {
println!(
" {} best action for edit_ts: {} (confidence: {:.1}%)",
worker.id(),
action,
confidence * 100.0
);
}
}
println!("\n💾 Storing vectors in shared memory...\n");
// Store some vector memories
let embeddings = vec![
("Authentication flow implementation", vec![0.1, 0.2, 0.8, 0.3]),
("Database connection pooling", vec![0.4, 0.1, 0.2, 0.9]),
("API rate limiting logic", vec![0.3, 0.7, 0.1, 0.4]),
];
for (content, embedding) in embeddings {
let id = coordinator.store_memory(content, embedding).await?;
println!(" Stored: {} (id: {})", content, &id[..8]);
}
// Search for similar vectors
let query = vec![0.1, 0.2, 0.7, 0.4];
let results = coordinator.search_memory(&query, 2).await;
println!("\n🔍 Vector search results:");
for (content, score) in results {
println!(" - {} (score: {:.3})", content, score);
}
println!("\n📊 Swarm Statistics:\n");
// Print stats for each agent
let stats = coordinator.get_stats().await;
println!(
" Coordinator: {} patterns, {} memories",
stats.total_patterns, stats.total_memories
);
for worker in &workers {
let stats = worker.get_stats().await;
println!(
" {}: {} patterns, confidence: {:.1}%",
worker.id(),
stats.total_patterns,
stats.avg_confidence * 100.0
);
}
println!("\n✨ Demo complete!\n");
Ok(())
}

View File

@@ -0,0 +1,306 @@
//! Tensor compression for efficient network transfer
//!
//! Uses LZ4 compression with optional quantization for vector data.
use crate::{Result, SwarmError};
use serde::{Deserialize, Serialize};
/// Compression level for tensor data
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum CompressionLevel {
/// No compression (fastest)
None,
/// Fast LZ4 compression (default)
Fast,
/// High compression ratio
High,
/// Quantize to 8-bit then compress
Quantized8,
/// Quantize to 4-bit then compress
Quantized4,
}
impl Default for CompressionLevel {
fn default() -> Self {
CompressionLevel::Fast
}
}
/// Tensor codec for compression/decompression
pub struct TensorCodec {
level: CompressionLevel,
}
impl TensorCodec {
/// Create new codec with default compression
pub fn new() -> Self {
Self {
level: CompressionLevel::Fast,
}
}
/// Create codec with specific compression level
pub fn with_level(level: CompressionLevel) -> Self {
Self { level }
}
/// Compress data
pub fn compress(&self, data: &[u8]) -> Result<Vec<u8>> {
match self.level {
CompressionLevel::None => Ok(data.to_vec()),
CompressionLevel::Fast | CompressionLevel::High => {
let compressed = lz4_flex::compress_prepend_size(data);
Ok(compressed)
}
CompressionLevel::Quantized8 | CompressionLevel::Quantized4 => {
// For quantized, just use LZ4 on the raw data
// Real implementation would quantize floats first
let compressed = lz4_flex::compress_prepend_size(data);
Ok(compressed)
}
}
}
/// Decompress data
pub fn decompress(&self, data: &[u8]) -> Result<Vec<u8>> {
match self.level {
CompressionLevel::None => Ok(data.to_vec()),
_ => {
lz4_flex::decompress_size_prepended(data)
.map_err(|e| SwarmError::Compression(e.to_string()))
}
}
}
/// Compress f32 tensor with quantization
pub fn compress_tensor(&self, tensor: &[f32]) -> Result<CompressedTensor> {
match self.level {
CompressionLevel::Quantized8 => {
let (quantized, scale, zero_point) = quantize_8bit(tensor);
let compressed = lz4_flex::compress_prepend_size(&quantized);
Ok(CompressedTensor {
data: compressed,
original_len: tensor.len(),
quantization: Some(QuantizationParams {
bits: 8,
scale,
zero_point,
}),
})
}
CompressionLevel::Quantized4 => {
let (quantized, scale, zero_point) = quantize_4bit(tensor);
let compressed = lz4_flex::compress_prepend_size(&quantized);
Ok(CompressedTensor {
data: compressed,
original_len: tensor.len(),
quantization: Some(QuantizationParams {
bits: 4,
scale,
zero_point,
}),
})
}
_ => {
// No quantization, just compress raw bytes
let bytes: Vec<u8> = tensor
.iter()
.flat_map(|f| f.to_le_bytes())
.collect();
let compressed = self.compress(&bytes)?;
Ok(CompressedTensor {
data: compressed,
original_len: tensor.len(),
quantization: None,
})
}
}
}
/// Decompress tensor back to f32
pub fn decompress_tensor(&self, compressed: &CompressedTensor) -> Result<Vec<f32>> {
let decompressed = lz4_flex::decompress_size_prepended(&compressed.data)
.map_err(|e| SwarmError::Compression(e.to_string()))?;
match &compressed.quantization {
Some(params) if params.bits == 8 => {
Ok(dequantize_8bit(&decompressed, params.scale, params.zero_point))
}
Some(params) if params.bits == 4 => {
Ok(dequantize_4bit(&decompressed, compressed.original_len, params.scale, params.zero_point))
}
_ => {
// Raw f32 bytes
let tensor: Vec<f32> = decompressed
.chunks_exact(4)
.map(|c| f32::from_le_bytes([c[0], c[1], c[2], c[3]]))
.collect();
Ok(tensor)
}
}
}
/// Get compression ratio estimate for level
pub fn estimated_ratio(&self) -> f32 {
match self.level {
CompressionLevel::None => 1.0,
CompressionLevel::Fast => 0.5,
CompressionLevel::High => 0.3,
CompressionLevel::Quantized8 => 0.15,
CompressionLevel::Quantized4 => 0.08,
}
}
}
impl Default for TensorCodec {
fn default() -> Self {
Self::new()
}
}
/// Compressed tensor with metadata
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompressedTensor {
pub data: Vec<u8>,
pub original_len: usize,
pub quantization: Option<QuantizationParams>,
}
/// Quantization parameters for dequantization
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QuantizationParams {
pub bits: u8,
pub scale: f32,
pub zero_point: f32,
}
/// Quantize f32 to 8-bit
fn quantize_8bit(tensor: &[f32]) -> (Vec<u8>, f32, f32) {
if tensor.is_empty() {
return (vec![], 1.0, 0.0);
}
let min_val = tensor.iter().cloned().fold(f32::INFINITY, f32::min);
let max_val = tensor.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
let scale = (max_val - min_val) / 255.0;
let zero_point = min_val;
let quantized: Vec<u8> = tensor
.iter()
.map(|&v| {
if scale == 0.0 {
0u8
} else {
((v - zero_point) / scale).clamp(0.0, 255.0) as u8
}
})
.collect();
(quantized, scale, zero_point)
}
/// Dequantize 8-bit back to f32
fn dequantize_8bit(quantized: &[u8], scale: f32, zero_point: f32) -> Vec<f32> {
quantized
.iter()
.map(|&q| (q as f32) * scale + zero_point)
.collect()
}
/// Quantize f32 to 4-bit (packed, 2 values per byte)
fn quantize_4bit(tensor: &[f32]) -> (Vec<u8>, f32, f32) {
if tensor.is_empty() {
return (vec![], 1.0, 0.0);
}
let min_val = tensor.iter().cloned().fold(f32::INFINITY, f32::min);
let max_val = tensor.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
let scale = (max_val - min_val) / 15.0;
let zero_point = min_val;
// Pack two 4-bit values per byte
let mut packed = Vec::with_capacity((tensor.len() + 1) / 2);
for chunk in tensor.chunks(2) {
let v0 = if scale == 0.0 {
0u8
} else {
((chunk[0] - zero_point) / scale).clamp(0.0, 15.0) as u8
};
let v1 = if chunk.len() > 1 && scale != 0.0 {
((chunk[1] - zero_point) / scale).clamp(0.0, 15.0) as u8
} else {
0u8
};
packed.push((v0 << 4) | v1);
}
(packed, scale, zero_point)
}
/// Dequantize 4-bit back to f32
fn dequantize_4bit(packed: &[u8], original_len: usize, scale: f32, zero_point: f32) -> Vec<f32> {
let mut result = Vec::with_capacity(original_len);
for &byte in packed {
let v0 = (byte >> 4) as f32 * scale + zero_point;
let v1 = (byte & 0x0F) as f32 * scale + zero_point;
result.push(v0);
if result.len() < original_len {
result.push(v1);
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_lz4_compression() {
let codec = TensorCodec::with_level(CompressionLevel::Fast);
let data = b"Hello, RuVector Edge! This is test data for compression.";
let compressed = codec.compress(data).unwrap();
let decompressed = codec.decompress(&compressed).unwrap();
assert_eq!(decompressed, data);
}
#[test]
fn test_8bit_quantization() {
let codec = TensorCodec::with_level(CompressionLevel::Quantized8);
let tensor: Vec<f32> = (0..100).map(|i| i as f32 / 100.0).collect();
let compressed = codec.compress_tensor(&tensor).unwrap();
let decompressed = codec.decompress_tensor(&compressed).unwrap();
// Check approximate equality (quantization introduces small errors)
for (orig, dec) in tensor.iter().zip(decompressed.iter()) {
assert!((orig - dec).abs() < 0.01);
}
}
#[test]
fn test_4bit_quantization() {
let codec = TensorCodec::with_level(CompressionLevel::Quantized4);
let tensor: Vec<f32> = (0..100).map(|i| i as f32 / 100.0).collect();
let compressed = codec.compress_tensor(&tensor).unwrap();
let decompressed = codec.decompress_tensor(&compressed).unwrap();
assert_eq!(decompressed.len(), tensor.len());
// 4-bit has more error, but should be within bounds
for (orig, dec) in tensor.iter().zip(decompressed.iter()) {
assert!((orig - dec).abs() < 0.1);
}
}
}

397
vendor/ruvector/examples/edge/src/gun.rs vendored Normal file
View File

@@ -0,0 +1,397 @@
//! GUN Decentralized Database Integration
//!
//! Provides decentralized P2P sync for swarm intelligence using GUN protocol.
//!
//! GUN Features:
//! - Offline-first: Works without internet
//! - Real-time sync: Changes propagate instantly
//! - Decentralized: No central server needed
//! - Graph database: Perfect for relationship data
//! - Conflict resolution: HAM (Hypothetical Amnesia Machine)
#[cfg(feature = "gun")]
use gundb::Node;
use crate::Result;
use crate::intelligence::{LearningState, Pattern};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
/// GUN-backed swarm state that syncs across all peers
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GunSwarmState {
/// Swarm identifier
pub swarm_id: String,
/// Connected peers
pub peers: HashMap<String, GunPeerInfo>,
/// Shared learning patterns (synced via GUN)
pub patterns: HashMap<String, Pattern>,
/// Shared memories (synced via GUN)
pub memories: Vec<GunMemory>,
/// Global swarm config
pub config: GunSwarmConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GunPeerInfo {
pub agent_id: String,
pub public_key: Option<String>,
pub last_seen: u64,
pub patterns_count: usize,
pub memories_count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GunMemory {
pub id: String,
pub content: String,
pub embedding_hash: String, // Hash of embedding for dedup
pub owner: String,
pub timestamp: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GunSwarmConfig {
/// GUN relay peers to connect to
pub relays: Vec<String>,
/// Enable encryption (SEA)
pub encrypted: bool,
/// Sync interval in ms
pub sync_interval_ms: u64,
/// Max patterns to sync
pub max_patterns: usize,
}
impl Default for GunSwarmConfig {
fn default() -> Self {
Self {
relays: vec![
"https://gun-manhattan.herokuapp.com/gun".to_string(),
"https://gun-us.herokuapp.com/gun".to_string(),
],
encrypted: true,
sync_interval_ms: 1000,
max_patterns: 10000,
}
}
}
/// GUN-powered decentralized swarm sync
pub struct GunSync {
swarm_id: String,
agent_id: String,
state: Arc<RwLock<GunSwarmState>>,
#[cfg(feature = "gun")]
node: Option<Node>,
config: GunSwarmConfig,
}
impl GunSync {
/// Create new GUN sync manager
pub fn new(swarm_id: &str, agent_id: &str, config: GunSwarmConfig) -> Self {
let state = GunSwarmState {
swarm_id: swarm_id.to_string(),
peers: HashMap::new(),
patterns: HashMap::new(),
memories: Vec::new(),
config: config.clone(),
};
Self {
swarm_id: swarm_id.to_string(),
agent_id: agent_id.to_string(),
state: Arc::new(RwLock::new(state)),
#[cfg(feature = "gun")]
node: None,
config,
}
}
/// Connect to GUN network
#[cfg(feature = "gun")]
pub async fn connect(&mut self) -> Result<()> {
use gundb::Node;
tracing::info!("Connecting to GUN network for swarm: {}", self.swarm_id);
// Create GUN node
let node = Node::new_with_config(gundb::Config {
peers: self.config.relays.clone(),
..Default::default()
});
self.node = Some(node);
// Subscribe to swarm updates
self.subscribe_to_swarm().await?;
tracing::info!("Connected to GUN network");
Ok(())
}
#[cfg(not(feature = "gun"))]
pub async fn connect(&mut self) -> Result<()> {
tracing::warn!("GUN feature not enabled, using local-only mode");
Ok(())
}
/// Subscribe to swarm data changes
#[cfg(feature = "gun")]
async fn subscribe_to_swarm(&self) -> Result<()> {
if let Some(ref node) = self.node {
let path = format!("swarms/{}", self.swarm_id);
// In real implementation, set up GUN subscriptions
// node.get(&path).on(|data| { ... });
tracing::info!("Subscribed to GUN path: {}", path);
}
Ok(())
}
/// Publish pattern to GUN network
pub async fn publish_pattern(&self, pattern: &Pattern) -> Result<()> {
let key = format!("{}|{}", pattern.state, pattern.action);
// Update local state
{
let mut state = self.state.write().await;
state.patterns.insert(key.clone(), pattern.clone());
}
// Publish to GUN (if connected)
#[cfg(feature = "gun")]
if let Some(ref node) = self.node {
let path = format!("swarms/{}/patterns/{}", self.swarm_id, key);
let data = serde_json::to_string(pattern)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
// node.get(&path).put(&data);
tracing::debug!("Published pattern to GUN: {}", path);
}
Ok(())
}
/// Publish memory to GUN network
pub async fn publish_memory(&self, memory: GunMemory) -> Result<()> {
// Update local state
{
let mut state = self.state.write().await;
state.memories.push(memory.clone());
}
// Publish to GUN
#[cfg(feature = "gun")]
if let Some(ref node) = self.node {
let path = format!("swarms/{}/memories/{}", self.swarm_id, memory.id);
let data = serde_json::to_string(&memory)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
// node.get(&path).put(&data);
tracing::debug!("Published memory to GUN: {}", path);
}
Ok(())
}
/// Announce peer presence
pub async fn announce_peer(&self) -> Result<()> {
let peer_info = GunPeerInfo {
agent_id: self.agent_id.clone(),
public_key: None, // Would be set with SEA encryption
last_seen: chrono::Utc::now().timestamp_millis() as u64,
patterns_count: self.state.read().await.patterns.len(),
memories_count: self.state.read().await.memories.len(),
};
// Update local state
{
let mut state = self.state.write().await;
state.peers.insert(self.agent_id.clone(), peer_info.clone());
}
// Publish to GUN
#[cfg(feature = "gun")]
if let Some(ref node) = self.node {
let path = format!("swarms/{}/peers/{}", self.swarm_id, self.agent_id);
let data = serde_json::to_string(&peer_info)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
// node.get(&path).put(&data);
tracing::debug!("Announced peer to GUN: {}", path);
}
Ok(())
}
/// Get all patterns from swarm
pub async fn get_patterns(&self) -> HashMap<String, Pattern> {
self.state.read().await.patterns.clone()
}
/// Get all peers in swarm
pub async fn get_peers(&self) -> Vec<GunPeerInfo> {
self.state.read().await.peers.values().cloned().collect()
}
/// Get swarm statistics
pub async fn get_stats(&self) -> GunSwarmStats {
let state = self.state.read().await;
GunSwarmStats {
swarm_id: state.swarm_id.clone(),
total_peers: state.peers.len(),
total_patterns: state.patterns.len(),
total_memories: state.memories.len(),
relays: self.config.relays.len(),
encrypted: self.config.encrypted,
}
}
/// Sync learning state to GUN
pub async fn sync_learning_state(&self, learning_state: &LearningState) -> Result<usize> {
let mut synced = 0;
for (_key, pattern) in &learning_state.patterns {
self.publish_pattern(pattern).await?;
synced += 1;
}
Ok(synced)
}
/// Import patterns from GUN to local learning state
pub async fn import_to_learning_state(&self, learning_state: &mut LearningState) -> usize {
let state = self.state.read().await;
let mut imported = 0;
for (key, pattern) in &state.patterns {
if !learning_state.patterns.contains_key(key) {
learning_state.patterns.insert(key.clone(), pattern.clone());
imported += 1;
} else {
// Merge patterns (take higher Q-value or more visits)
if let Some(existing) = learning_state.patterns.get_mut(key) {
if pattern.visits > existing.visits {
*existing = pattern.clone();
imported += 1;
}
}
}
}
imported
}
}
/// GUN swarm statistics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GunSwarmStats {
pub swarm_id: String,
pub total_peers: usize,
pub total_patterns: usize,
pub total_memories: usize,
pub relays: usize,
pub encrypted: bool,
}
/// Builder for GUN-enabled swarm
pub struct GunSwarmBuilder {
swarm_id: String,
relays: Vec<String>,
encrypted: bool,
sync_interval_ms: u64,
}
impl GunSwarmBuilder {
pub fn new(swarm_id: &str) -> Self {
Self {
swarm_id: swarm_id.to_string(),
relays: vec![],
encrypted: true,
sync_interval_ms: 1000,
}
}
/// Add a GUN relay peer
pub fn with_relay(mut self, url: &str) -> Self {
self.relays.push(url.to_string());
self
}
/// Add default public relays
pub fn with_public_relays(mut self) -> Self {
self.relays.extend(vec![
"https://gun-manhattan.herokuapp.com/gun".to_string(),
"https://gun-us.herokuapp.com/gun".to_string(),
"https://gun-eu.herokuapp.com/gun".to_string(),
]);
self
}
/// Enable/disable encryption
pub fn encrypted(mut self, enabled: bool) -> Self {
self.encrypted = enabled;
self
}
/// Set sync interval
pub fn sync_interval(mut self, ms: u64) -> Self {
self.sync_interval_ms = ms;
self
}
/// Build GunSync instance
pub fn build(self, agent_id: &str) -> GunSync {
let config = GunSwarmConfig {
relays: if self.relays.is_empty() {
GunSwarmConfig::default().relays
} else {
self.relays
},
encrypted: self.encrypted,
sync_interval_ms: self.sync_interval_ms,
max_patterns: 10000,
};
GunSync::new(&self.swarm_id, agent_id, config)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_gun_sync_local() {
let sync = GunSwarmBuilder::new("test-swarm")
.encrypted(false)
.build("agent-001");
// Test pattern publishing (local only, no GUN feature)
let pattern = Pattern::new("edit_ts", "typescript-developer");
sync.publish_pattern(&pattern).await.unwrap();
let patterns = sync.get_patterns().await;
assert_eq!(patterns.len(), 1);
// Test stats
let stats = sync.get_stats().await;
assert_eq!(stats.swarm_id, "test-swarm");
assert_eq!(stats.total_patterns, 1);
}
#[tokio::test]
async fn test_gun_peer_announce() {
let sync = GunSwarmBuilder::new("test-swarm")
.build("agent-001");
sync.announce_peer().await.unwrap();
let peers = sync.get_peers().await;
assert_eq!(peers.len(), 1);
assert_eq!(peers[0].agent_id, "agent-001");
}
}

View File

@@ -0,0 +1,319 @@
//! Distributed intelligence synchronization
//!
//! Sync Q-learning patterns, trajectories, and learning state across swarm agents.
use crate::{Result, SwarmError, compression::TensorCodec};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
/// Learning pattern with Q-value
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Pattern {
pub state: String,
pub action: String,
pub q_value: f64,
pub visits: u64,
pub last_update: u64,
pub confidence: f64,
}
impl Pattern {
pub fn new(state: &str, action: &str) -> Self {
Self {
state: state.to_string(),
action: action.to_string(),
q_value: 0.0,
visits: 0,
last_update: 0,
confidence: 0.0,
}
}
/// Merge with another pattern (federated learning style)
pub fn merge(&mut self, other: &Pattern, weight: f64) {
let total_visits = self.visits + other.visits;
if total_visits > 0 {
// Weighted average based on visits
let self_weight = self.visits as f64 / total_visits as f64;
let other_weight = other.visits as f64 / total_visits as f64;
self.q_value = self.q_value * self_weight + other.q_value * other_weight * weight;
self.visits = total_visits;
self.confidence = (self.confidence + other.confidence * weight) / 2.0;
}
}
}
/// Learning trajectory for decision transformer
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Trajectory {
pub id: String,
pub steps: Vec<TrajectoryStep>,
pub total_reward: f64,
pub success: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TrajectoryStep {
pub state: String,
pub action: String,
pub reward: f64,
pub timestamp: u64,
}
/// Complete learning state for sync
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LearningState {
pub agent_id: String,
pub patterns: HashMap<String, Pattern>,
pub trajectories: Vec<Trajectory>,
pub algorithm_stats: HashMap<String, AlgorithmStats>,
pub version: u64,
pub timestamp: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AlgorithmStats {
pub algorithm: String,
pub updates: u64,
pub avg_reward: f64,
pub convergence: f64,
}
impl Default for LearningState {
fn default() -> Self {
Self {
agent_id: String::new(),
patterns: HashMap::new(),
trajectories: Vec::new(),
algorithm_stats: HashMap::new(),
version: 0,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
}
}
}
/// Intelligence synchronization manager
pub struct IntelligenceSync {
local_state: Arc<RwLock<LearningState>>,
peer_states: Arc<RwLock<HashMap<String, LearningState>>>,
codec: TensorCodec,
merge_threshold: f64,
}
impl IntelligenceSync {
/// Create new intelligence sync manager
pub fn new(agent_id: &str) -> Self {
let mut state = LearningState::default();
state.agent_id = agent_id.to_string();
Self {
local_state: Arc::new(RwLock::new(state)),
peer_states: Arc::new(RwLock::new(HashMap::new())),
codec: TensorCodec::new(),
merge_threshold: 0.1, // Only merge if delta > 10%
}
}
/// Get local learning state
pub fn get_state(&self) -> LearningState {
self.local_state.read().clone()
}
/// Update local pattern
pub fn update_pattern(&self, state: &str, action: &str, reward: f64) {
let mut local = self.local_state.write();
let key = format!("{}|{}", state, action);
let pattern = local.patterns.entry(key).or_insert_with(|| Pattern::new(state, action));
// Q-learning update
let alpha = 0.1;
pattern.q_value = pattern.q_value + alpha * (reward - pattern.q_value);
pattern.visits += 1;
pattern.last_update = chrono::Utc::now().timestamp_millis() as u64;
pattern.confidence = 1.0 - (1.0 / (pattern.visits as f64 + 1.0));
local.version += 1;
}
/// Serialize state for network transfer
pub fn serialize_state(&self) -> Result<Vec<u8>> {
let state = self.local_state.read();
let json = serde_json::to_vec(&*state)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
// Compress for transfer
self.codec.compress(&json)
}
/// Deserialize and merge peer state
pub fn merge_peer_state(&self, peer_id: &str, data: &[u8]) -> Result<MergeResult> {
// Decompress
let json = self.codec.decompress(data)?;
let peer_state: LearningState = serde_json::from_slice(&json)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
// Store peer state
{
let mut peers = self.peer_states.write();
peers.insert(peer_id.to_string(), peer_state.clone());
}
// Merge patterns
let mut local = self.local_state.write();
let mut merged_count = 0;
let mut new_count = 0;
for (key, peer_pattern) in &peer_state.patterns {
if let Some(local_pattern) = local.patterns.get_mut(key) {
// Merge existing pattern
let delta = (peer_pattern.q_value - local_pattern.q_value).abs();
if delta > self.merge_threshold {
local_pattern.merge(peer_pattern, 0.5);
merged_count += 1;
}
} else {
// New pattern from peer
local.patterns.insert(key.clone(), peer_pattern.clone());
new_count += 1;
}
}
local.version += 1;
Ok(MergeResult {
peer_id: peer_id.to_string(),
merged_patterns: merged_count,
new_patterns: new_count,
local_version: local.version,
})
}
/// Get best action for state using aggregated knowledge
pub fn get_best_action(&self, state: &str, actions: &[String]) -> Option<(String, f64)> {
let local = self.local_state.read();
let mut best_action = None;
let mut best_q = f64::NEG_INFINITY;
for action in actions {
let key = format!("{}|{}", state, action);
if let Some(pattern) = local.patterns.get(&key) {
if pattern.q_value > best_q {
best_q = pattern.q_value;
best_action = Some((action.clone(), pattern.confidence));
}
}
}
best_action
}
/// Get sync delta (only changed patterns since version)
pub fn get_delta(&self, since_version: u64) -> LearningState {
let local = self.local_state.read();
let mut delta = LearningState {
agent_id: local.agent_id.clone(),
version: local.version,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
..Default::default()
};
// Only include patterns updated since version
for (key, pattern) in &local.patterns {
if pattern.last_update > since_version {
delta.patterns.insert(key.clone(), pattern.clone());
}
}
delta
}
/// Get aggregated stats across all peers
pub fn get_swarm_stats(&self) -> SwarmStats {
let local = self.local_state.read();
let peers = self.peer_states.read();
let mut total_patterns = local.patterns.len();
let mut total_visits = 0u64;
let mut avg_confidence = 0.0;
for pattern in local.patterns.values() {
total_visits += pattern.visits;
avg_confidence += pattern.confidence;
}
for peer in peers.values() {
total_patterns += peer.patterns.len();
}
let pattern_count = local.patterns.len();
if pattern_count > 0 {
avg_confidence /= pattern_count as f64;
}
SwarmStats {
total_agents: peers.len() + 1,
total_patterns,
total_visits,
avg_confidence,
local_version: local.version,
}
}
}
/// Result of merging peer state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergeResult {
pub peer_id: String,
pub merged_patterns: usize,
pub new_patterns: usize,
pub local_version: u64,
}
/// Aggregated swarm statistics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SwarmStats {
pub total_agents: usize,
pub total_patterns: usize,
pub total_visits: u64,
pub avg_confidence: f64,
pub local_version: u64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pattern_update() {
let sync = IntelligenceSync::new("test-agent");
sync.update_pattern("edit_ts", "coder", 0.8);
sync.update_pattern("edit_ts", "coder", 0.9);
let state = sync.get_state();
let pattern = state.patterns.get("edit_ts|coder").unwrap();
assert!(pattern.q_value > 0.0);
assert_eq!(pattern.visits, 2);
}
#[test]
fn test_best_action() {
let sync = IntelligenceSync::new("test-agent");
sync.update_pattern("edit_ts", "coder", 0.5);
sync.update_pattern("edit_ts", "reviewer", 0.9);
let actions = vec!["coder".to_string(), "reviewer".to_string()];
let best = sync.get_best_action("edit_ts", &actions);
assert!(best.is_some());
assert_eq!(best.unwrap().0, "reviewer");
}
}

193
vendor/ruvector/examples/edge/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,193 @@
//! # RuVector Edge - Distributed AI Swarm Communication
//!
//! Edge AI swarm communication using `ruv-swarm-transport` with RuVector intelligence.
//!
//! ## Features
//!
//! - **WebSocket Transport**: Remote swarm communication
//! - **SharedMemory Transport**: High-performance local IPC
//! - **WASM Support**: Run in browser/edge environments
//! - **Intelligence Sync**: Distributed Q-learning across agents
//! - **Memory Sharing**: Shared vector memory for RAG
//! - **Tensor Compression**: Efficient pattern transfer
//!
//! ## Quick Start
//!
//! ```rust,no_run
//! use ruvector_edge::{SwarmAgent, SwarmConfig, Transport};
//!
//! #[tokio::main]
//! async fn main() {
//! let config = SwarmConfig::default()
//! .with_transport(Transport::WebSocket)
//! .with_agent_id("agent-001");
//!
//! let mut agent = SwarmAgent::new(config).await.unwrap();
//! agent.join_swarm("ws://coordinator:8080").await.unwrap();
//!
//! // Sync learning patterns
//! agent.sync_patterns().await.unwrap();
//! }
//! ```
// Native-only modules (require tokio)
#[cfg(feature = "native")]
pub mod transport;
#[cfg(feature = "native")]
pub mod agent;
#[cfg(feature = "native")]
pub mod gun;
// Cross-platform modules
pub mod intelligence;
pub mod memory;
pub mod compression;
pub mod protocol;
pub mod p2p;
pub mod plaid;
// WASM bindings
#[cfg(feature = "wasm")]
pub mod wasm;
// Native re-exports
#[cfg(feature = "native")]
pub use agent::{SwarmAgent, AgentRole};
#[cfg(feature = "native")]
pub use transport::{Transport, TransportConfig};
#[cfg(feature = "native")]
pub use gun::{GunSync, GunSwarmBuilder, GunSwarmConfig, GunSwarmStats};
// Cross-platform re-exports
pub use intelligence::{IntelligenceSync, LearningState, Pattern};
pub use memory::{SharedMemory, VectorMemory};
pub use compression::{TensorCodec, CompressionLevel};
pub use protocol::{SwarmMessage, MessageType};
pub use p2p::{IdentityManager, CryptoV2, RelayManager, ArtifactStore};
pub use plaid::{
Transaction, SpendingPattern, CategoryPrediction,
AnomalyResult, BudgetRecommendation, FinancialLearningState,
};
#[cfg(feature = "native")]
pub use p2p::{P2PSwarmV2, SwarmStatus};
// WASM re-exports
#[cfg(feature = "wasm")]
pub use wasm::*;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// Swarm configuration (native only - uses Transport)
#[cfg(feature = "native")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SwarmConfig {
pub agent_id: String,
pub agent_role: AgentRole,
pub transport: Transport,
pub coordinator_url: Option<String>,
pub sync_interval_ms: u64,
pub compression_level: CompressionLevel,
pub max_peers: usize,
pub enable_learning: bool,
pub enable_memory_sync: bool,
}
#[cfg(feature = "native")]
impl Default for SwarmConfig {
fn default() -> Self {
Self {
agent_id: Uuid::new_v4().to_string(),
agent_role: AgentRole::Worker,
transport: Transport::WebSocket,
coordinator_url: None,
sync_interval_ms: 1000,
compression_level: CompressionLevel::Fast,
max_peers: 100,
enable_learning: true,
enable_memory_sync: true,
}
}
}
#[cfg(feature = "native")]
impl SwarmConfig {
pub fn with_transport(mut self, transport: Transport) -> Self {
self.transport = transport;
self
}
pub fn with_agent_id(mut self, id: impl Into<String>) -> Self {
self.agent_id = id.into();
self
}
pub fn with_role(mut self, role: AgentRole) -> Self {
self.agent_role = role;
self
}
pub fn with_coordinator(mut self, url: impl Into<String>) -> Self {
self.coordinator_url = Some(url.into());
self
}
}
/// Error types for edge swarm operations
#[derive(Debug, thiserror::Error)]
pub enum SwarmError {
#[error("Transport error: {0}")]
Transport(String),
#[error("Connection failed: {0}")]
Connection(String),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("Compression error: {0}")]
Compression(String),
#[error("Sync error: {0}")]
Sync(String),
#[error("Agent not found: {0}")]
AgentNotFound(String),
#[error("Configuration error: {0}")]
Config(String),
}
pub type Result<T> = std::result::Result<T, SwarmError>;
/// Prelude for convenient imports
pub mod prelude {
pub use crate::{
SwarmError, Result, MessageType,
IntelligenceSync, SharedMemory,
CompressionLevel,
};
#[cfg(feature = "native")]
pub use crate::{
SwarmAgent, SwarmConfig,
Transport, AgentRole,
};
}
#[cfg(all(test, feature = "native"))]
mod tests {
use super::*;
#[test]
fn test_config_builder() {
let config = SwarmConfig::default()
.with_agent_id("test-agent")
.with_transport(Transport::SharedMemory)
.with_role(AgentRole::Coordinator);
assert_eq!(config.agent_id, "test-agent");
assert!(matches!(config.transport, Transport::SharedMemory));
assert!(matches!(config.agent_role, AgentRole::Coordinator));
}
}

View File

@@ -0,0 +1,284 @@
//! Shared vector memory for distributed RAG
//!
//! Enables agents to share vector embeddings and semantic memories across the swarm.
use crate::{Result, SwarmError, compression::TensorCodec};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
/// Vector memory entry
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VectorEntry {
pub id: String,
pub content: String,
pub embedding: Vec<f32>,
pub metadata: HashMap<String, String>,
pub timestamp: u64,
pub owner_agent: String,
pub access_count: u64,
}
impl VectorEntry {
pub fn new(id: &str, content: &str, embedding: Vec<f32>, owner: &str) -> Self {
Self {
id: id.to_string(),
content: content.to_string(),
embedding,
metadata: HashMap::new(),
timestamp: chrono::Utc::now().timestamp_millis() as u64,
owner_agent: owner.to_string(),
access_count: 0,
}
}
/// Compute cosine similarity with query vector
pub fn similarity(&self, query: &[f32]) -> f32 {
if self.embedding.len() != query.len() {
return 0.0;
}
let mut dot = 0.0f32;
let mut norm_a = 0.0f32;
let mut norm_b = 0.0f32;
for (a, b) in self.embedding.iter().zip(query.iter()) {
dot += a * b;
norm_a += a * a;
norm_b += b * b;
}
if norm_a == 0.0 || norm_b == 0.0 {
return 0.0;
}
dot / (norm_a.sqrt() * norm_b.sqrt())
}
}
/// Shared vector memory across swarm
pub struct VectorMemory {
entries: Arc<RwLock<HashMap<String, VectorEntry>>>,
agent_id: String,
max_entries: usize,
codec: TensorCodec,
}
impl VectorMemory {
/// Create new vector memory
pub fn new(agent_id: &str, max_entries: usize) -> Self {
Self {
entries: Arc::new(RwLock::new(HashMap::new())),
agent_id: agent_id.to_string(),
max_entries,
codec: TensorCodec::new(),
}
}
/// Store a vector entry
pub fn store(&self, content: &str, embedding: Vec<f32>) -> Result<String> {
let id = uuid::Uuid::new_v4().to_string();
let entry = VectorEntry::new(&id, content, embedding, &self.agent_id);
let mut entries = self.entries.write();
// Evict oldest if at capacity
if entries.len() >= self.max_entries {
if let Some(oldest_id) = entries
.iter()
.min_by_key(|(_, e)| e.timestamp)
.map(|(id, _)| id.clone())
{
entries.remove(&oldest_id);
}
}
entries.insert(id.clone(), entry);
Ok(id)
}
/// Search for similar vectors
pub fn search(&self, query: &[f32], top_k: usize) -> Vec<(VectorEntry, f32)> {
let mut entries = self.entries.write();
let mut results: Vec<_> = entries
.values_mut()
.map(|entry| {
entry.access_count += 1;
let score = entry.similarity(query);
(entry.clone(), score)
})
.collect();
results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
results.truncate(top_k);
results
}
/// Get entry by ID
pub fn get(&self, id: &str) -> Option<VectorEntry> {
let mut entries = self.entries.write();
if let Some(entry) = entries.get_mut(id) {
entry.access_count += 1;
Some(entry.clone())
} else {
None
}
}
/// Delete entry
pub fn delete(&self, id: &str) -> bool {
let mut entries = self.entries.write();
entries.remove(id).is_some()
}
/// Serialize all entries for sync
pub fn serialize(&self) -> Result<Vec<u8>> {
let entries = self.entries.read();
let data: Vec<_> = entries.values().cloned().collect();
let json = serde_json::to_vec(&data)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
self.codec.compress(&json)
}
/// Merge entries from peer
pub fn merge(&self, data: &[u8]) -> Result<usize> {
let json = self.codec.decompress(data)?;
let peer_entries: Vec<VectorEntry> = serde_json::from_slice(&json)
.map_err(|e| SwarmError::Serialization(e.to_string()))?;
let mut entries = self.entries.write();
let mut merged = 0;
for entry in peer_entries {
if !entries.contains_key(&entry.id) {
if entries.len() < self.max_entries {
entries.insert(entry.id.clone(), entry);
merged += 1;
}
}
}
Ok(merged)
}
/// Get memory stats
pub fn stats(&self) -> MemoryStats {
let entries = self.entries.read();
let total_vectors = entries.len();
let total_dims: usize = entries.values().map(|e| e.embedding.len()).sum();
let avg_dims = if total_vectors > 0 {
total_dims / total_vectors
} else {
0
};
let total_accesses: u64 = entries.values().map(|e| e.access_count).sum();
MemoryStats {
total_entries: total_vectors,
avg_dimensions: avg_dims,
total_accesses,
memory_bytes: total_dims * 4, // f32 = 4 bytes
}
}
}
/// Memory statistics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoryStats {
pub total_entries: usize,
pub avg_dimensions: usize,
pub total_accesses: u64,
pub memory_bytes: usize,
}
/// Shared memory segment for high-performance local IPC
pub struct SharedMemory {
name: String,
size: usize,
// In real implementation, this would use mmap or shared memory
buffer: Arc<RwLock<Vec<u8>>>,
}
impl SharedMemory {
/// Create or attach to shared memory segment
pub fn new(name: &str, size: usize) -> Result<Self> {
Ok(Self {
name: name.to_string(),
size,
buffer: Arc::new(RwLock::new(vec![0u8; size])),
})
}
/// Write data at offset
pub fn write(&self, offset: usize, data: &[u8]) -> Result<()> {
let mut buffer = self.buffer.write();
if offset + data.len() > self.size {
return Err(SwarmError::Transport("Buffer overflow".into()));
}
buffer[offset..offset + data.len()].copy_from_slice(data);
Ok(())
}
/// Read data at offset
pub fn read(&self, offset: usize, len: usize) -> Result<Vec<u8>> {
let buffer = self.buffer.read();
if offset + len > self.size {
return Err(SwarmError::Transport("Buffer underflow".into()));
}
Ok(buffer[offset..offset + len].to_vec())
}
/// Get segment info
pub fn info(&self) -> SharedMemoryInfo {
SharedMemoryInfo {
name: self.name.clone(),
size: self.size,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SharedMemoryInfo {
pub name: String,
pub size: usize,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_vector_memory() {
let memory = VectorMemory::new("test-agent", 100);
let embedding = vec![0.1, 0.2, 0.3, 0.4];
let id = memory.store("test content", embedding.clone()).unwrap();
let results = memory.search(&embedding, 5);
assert!(!results.is_empty());
assert!(results[0].1 > 0.99); // Should be almost identical
let entry = memory.get(&id);
assert!(entry.is_some());
assert_eq!(entry.unwrap().content, "test content");
}
#[test]
fn test_shared_memory() {
let shm = SharedMemory::new("test-segment", 1024).unwrap();
let data = b"Hello, Swarm!";
shm.write(0, data).unwrap();
let read = shm.read(0, data.len()).unwrap();
assert_eq!(read, data);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,356 @@
//! Artifact Store - Local CID-based Storage
//!
//! Security principles:
//! - LOCAL-ONLY mode by default (no gateway fetch)
//! - Real IPFS CID support requires multiformats (not yet implemented)
//! - LRU cache with configurable size limits
//! - Chunk-based storage for large artifacts
use crate::p2p::crypto::CryptoV2;
use crate::p2p::identity::IdentityManager;
use crate::p2p::envelope::{ArtifactPointer, ArtifactType};
use parking_lot::RwLock;
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use lz4_flex;
/// Chunk size for large artifacts (256KB)
pub const CHUNK_SIZE: usize = 256 * 1024;
/// Maximum artifact size (10MB)
pub const MAX_ARTIFACT_SIZE: usize = 10 * 1024 * 1024;
/// Default cache size (100MB)
pub const DEFAULT_CACHE_SIZE: usize = 100 * 1024 * 1024;
/// Stored artifact with metadata
#[derive(Debug, Clone)]
struct StoredArtifact {
data: Vec<u8>,
compressed: bool,
created_at: u64,
}
/// Artifact Store with LRU eviction
pub struct ArtifactStore {
cache: Arc<RwLock<HashMap<String, StoredArtifact>>>,
lru_order: Arc<RwLock<VecDeque<String>>>,
current_size: Arc<RwLock<usize>>,
max_cache_size: usize,
max_artifact_size: usize,
/// If true, allows gateway fetch (requires real IPFS CIDs)
/// Default: false (local-only mode)
enable_gateway_fetch: bool,
}
impl ArtifactStore {
/// Create new artifact store with default settings (local-only)
pub fn new() -> Self {
Self {
cache: Arc::new(RwLock::new(HashMap::new())),
lru_order: Arc::new(RwLock::new(VecDeque::new())),
current_size: Arc::new(RwLock::new(0)),
max_cache_size: DEFAULT_CACHE_SIZE,
max_artifact_size: MAX_ARTIFACT_SIZE,
enable_gateway_fetch: false, // LOCAL-ONLY by default
}
}
/// Create with custom settings
pub fn with_config(max_cache_size: usize, max_artifact_size: usize) -> Self {
Self {
cache: Arc::new(RwLock::new(HashMap::new())),
lru_order: Arc::new(RwLock::new(VecDeque::new())),
current_size: Arc::new(RwLock::new(0)),
max_cache_size,
max_artifact_size,
enable_gateway_fetch: false,
}
}
/// Store artifact and return local CID
pub fn store(&self, data: &[u8], compress: bool) -> Result<StoredArtifactInfo, String> {
if data.len() > self.max_artifact_size {
return Err(format!(
"Artifact too large: {} > {}",
data.len(),
self.max_artifact_size
));
}
let (stored_data, compressed) = if compress && data.len() > 1024 {
// Compress if larger than 1KB
match lz4_flex::compress_prepend_size(data) {
compressed if compressed.len() < data.len() => (compressed, true),
_ => (data.to_vec(), false),
}
} else {
(data.to_vec(), false)
};
// Generate local CID (NOT a real IPFS CID)
let cid = CryptoV2::generate_local_cid(data); // Hash of original, not compressed
let artifact = StoredArtifact {
data: stored_data.clone(),
compressed,
created_at: chrono::Utc::now().timestamp_millis() as u64,
};
// Evict if necessary
self.evict_if_needed(stored_data.len());
// Store
{
let mut cache = self.cache.write();
cache.insert(cid.clone(), artifact);
}
// Update LRU
{
let mut lru = self.lru_order.write();
lru.push_back(cid.clone());
}
// Update size
{
let mut size = self.current_size.write();
*size += stored_data.len();
}
let chunks = (data.len() + CHUNK_SIZE - 1) / CHUNK_SIZE;
Ok(StoredArtifactInfo {
cid,
original_size: data.len(),
stored_size: stored_data.len(),
compressed,
chunks,
})
}
/// Retrieve artifact by CID (local-only)
pub fn retrieve(&self, cid: &str) -> Option<Vec<u8>> {
// Only check local cache in local-only mode
let cache = self.cache.read();
let artifact = cache.get(cid)?;
// Update LRU (move to back)
{
let mut lru = self.lru_order.write();
if let Some(pos) = lru.iter().position(|c| c == cid) {
lru.remove(pos);
lru.push_back(cid.to_string());
}
}
// Decompress if needed
if artifact.compressed {
lz4_flex::decompress_size_prepended(&artifact.data).ok()
} else {
Some(artifact.data.clone())
}
}
/// Check if artifact exists locally
pub fn exists(&self, cid: &str) -> bool {
self.cache.read().contains_key(cid)
}
/// Delete artifact
pub fn delete(&self, cid: &str) -> bool {
let mut cache = self.cache.write();
if let Some(artifact) = cache.remove(cid) {
let mut size = self.current_size.write();
*size = size.saturating_sub(artifact.data.len());
let mut lru = self.lru_order.write();
if let Some(pos) = lru.iter().position(|c| c == cid) {
lru.remove(pos);
}
true
} else {
false
}
}
/// Get cache statistics
pub fn stats(&self) -> ArtifactStoreStats {
let cache = self.cache.read();
ArtifactStoreStats {
artifact_count: cache.len(),
current_size: *self.current_size.read(),
max_size: self.max_cache_size,
utilization: *self.current_size.read() as f64 / self.max_cache_size as f64,
}
}
/// Evict oldest artifacts if needed
fn evict_if_needed(&self, new_size: usize) {
let mut size = self.current_size.write();
let mut lru = self.lru_order.write();
let mut cache = self.cache.write();
while *size + new_size > self.max_cache_size && !lru.is_empty() {
if let Some(oldest_cid) = lru.pop_front() {
if let Some(artifact) = cache.remove(&oldest_cid) {
*size = size.saturating_sub(artifact.data.len());
tracing::debug!("Evicted artifact: {}", oldest_cid);
}
}
}
}
/// Create artifact pointer for publishing
pub fn create_pointer(
&self,
artifact_type: ArtifactType,
agent_id: &str,
cid: &str,
dimensions: &str,
identity: &IdentityManager,
) -> Option<ArtifactPointer> {
let cache = self.cache.read();
let artifact = cache.get(cid)?;
let data = if artifact.compressed {
lz4_flex::decompress_size_prepended(&artifact.data).ok()?
} else {
artifact.data.clone()
};
// Compute checksum (first 16 bytes of hash)
let hash = CryptoV2::hash(&data);
let mut checksum = [0u8; 16];
checksum.copy_from_slice(&hash[..16]);
// Schema hash (first 8 bytes of type name hash)
let type_name = format!("{:?}", artifact_type);
let type_hash = CryptoV2::hash(type_name.as_bytes());
let mut schema_hash = [0u8; 8];
schema_hash.copy_from_slice(&type_hash[..8]);
let mut pointer = ArtifactPointer {
artifact_type,
agent_id: agent_id.to_string(),
cid: cid.to_string(),
version: 1,
schema_hash,
dimensions: dimensions.to_string(),
checksum,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
signature: [0u8; 64],
};
// Sign the pointer
let canonical = pointer.canonical_for_signing();
pointer.signature = identity.sign(canonical.as_bytes());
Some(pointer)
}
/// Clear all artifacts
pub fn clear(&self) {
self.cache.write().clear();
self.lru_order.write().clear();
*self.current_size.write() = 0;
}
}
impl Default for ArtifactStore {
fn default() -> Self {
Self::new()
}
}
/// Information about stored artifact
#[derive(Debug, Clone)]
pub struct StoredArtifactInfo {
pub cid: String,
pub original_size: usize,
pub stored_size: usize,
pub compressed: bool,
pub chunks: usize,
}
/// Artifact store statistics
#[derive(Debug, Clone)]
pub struct ArtifactStoreStats {
pub artifact_count: usize,
pub current_size: usize,
pub max_size: usize,
pub utilization: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_store_and_retrieve() {
let store = ArtifactStore::new();
let data = b"Hello, World!";
let info = store.store(data, false).unwrap();
assert!(info.cid.starts_with("local:"));
let retrieved = store.retrieve(&info.cid).unwrap();
assert_eq!(data.as_slice(), retrieved.as_slice());
}
#[test]
fn test_compression() {
let store = ArtifactStore::new();
// Create data that compresses well
let data = vec![0u8; 10000];
let info = store.store(&data, true).unwrap();
assert!(info.compressed);
assert!(info.stored_size < info.original_size);
let retrieved = store.retrieve(&info.cid).unwrap();
assert_eq!(data, retrieved);
}
#[test]
fn test_size_limit() {
// Set max_artifact_size to 100 bytes
let store = ArtifactStore::with_config(1024, 100);
// Store artifact too large should fail
let data = vec![0u8; 200];
let result = store.store(&data, false);
assert!(result.is_err()); // Should fail due to max_artifact_size
// Small artifact should succeed
let small_data = vec![0u8; 50];
let result = store.store(&small_data, false);
assert!(result.is_ok());
}
#[test]
fn test_eviction() {
// Small cache for testing eviction
let store = ArtifactStore::with_config(200, 100);
// Store multiple small artifacts
let info1 = store.store(b"artifact 1", false).unwrap();
let info2 = store.store(b"artifact 2", false).unwrap();
let info3 = store.store(b"artifact 3", false).unwrap();
// All should be retrievable (cache is large enough)
assert!(store.exists(&info1.cid));
assert!(store.exists(&info2.cid));
assert!(store.exists(&info3.cid));
}
#[test]
fn test_local_only_mode() {
let store = ArtifactStore::new();
// Non-existent CID should return None (no gateway fetch)
let result = store.retrieve("local:nonexistent");
assert!(result.is_none());
}
}

View File

@@ -0,0 +1,246 @@
//! Cryptographic Primitives - AES-256-GCM + Canonical Serialization
//!
//! Security principles:
//! - AES-256-GCM authenticated encryption
//! - Canonical JSON with sorted keys (not relying on insertion order)
//! - Proper IV/nonce handling
use aes_gcm::{
aead::{Aead, KeyInit},
Aes256Gcm, Nonce,
};
use sha2::{Sha256, Digest};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use rand::RngCore;
/// Encrypted payload with IV and auth tag
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EncryptedPayload {
pub ciphertext: Vec<u8>,
pub iv: [u8; 12],
pub tag: [u8; 16],
}
/// Crypto primitives for P2P communication
pub struct CryptoV2;
impl CryptoV2 {
/// Encrypt data with AES-256-GCM
pub fn encrypt(data: &[u8], key: &[u8; 32]) -> Result<EncryptedPayload, String> {
let cipher = Aes256Gcm::new_from_slice(key)
.map_err(|e| format!("Invalid key: {}", e))?;
// Generate random IV
let mut iv = [0u8; 12];
rand::rngs::OsRng.fill_bytes(&mut iv);
let nonce = Nonce::from_slice(&iv);
// Encrypt (GCM includes auth tag in output)
let ciphertext_with_tag = cipher.encrypt(nonce, data)
.map_err(|e| format!("Encryption failed: {}", e))?;
// Split ciphertext and tag (tag is last 16 bytes)
let tag_start = ciphertext_with_tag.len() - 16;
let ciphertext = ciphertext_with_tag[..tag_start].to_vec();
let mut tag = [0u8; 16];
tag.copy_from_slice(&ciphertext_with_tag[tag_start..]);
Ok(EncryptedPayload { ciphertext, iv, tag })
}
/// Decrypt data with AES-256-GCM
pub fn decrypt(encrypted: &EncryptedPayload, key: &[u8; 32]) -> Result<Vec<u8>, String> {
let cipher = Aes256Gcm::new_from_slice(key)
.map_err(|e| format!("Invalid key: {}", e))?;
let nonce = Nonce::from_slice(&encrypted.iv);
// Reconstruct ciphertext with tag
let mut ciphertext_with_tag = encrypted.ciphertext.clone();
ciphertext_with_tag.extend_from_slice(&encrypted.tag);
cipher.decrypt(nonce, ciphertext_with_tag.as_ref())
.map_err(|_| "Decryption failed: authentication failed".to_string())
}
/// SHA-256 hash
pub fn hash(data: &[u8]) -> [u8; 32] {
let mut hasher = Sha256::new();
hasher.update(data);
hasher.finalize().into()
}
/// SHA-256 hash as hex string
pub fn hash_hex(data: &[u8]) -> String {
hex::encode(Self::hash(data))
}
/// Generate a simplified CID (local-only, not real IPFS)
/// For real IPFS, use multiformats library
pub fn generate_local_cid(data: &[u8]) -> String {
let hash = Self::hash_hex(data);
format!("local:{}", &hash[..32])
}
}
/// Canonical JSON serialization with sorted keys
/// This is critical for signature verification
pub struct CanonicalJson;
impl CanonicalJson {
/// Serialize value to canonical JSON (sorted keys, no spaces)
pub fn stringify(value: &Value) -> String {
Self::stringify_value(value)
}
/// Parse and re-serialize to canonical form
pub fn canonicalize(json_str: &str) -> Result<String, serde_json::Error> {
let value: Value = serde_json::from_str(json_str)?;
Ok(Self::stringify(&value))
}
/// Serialize any struct to canonical JSON
pub fn serialize<T: Serialize>(value: &T) -> Result<String, serde_json::Error> {
let json_value = serde_json::to_value(value)?;
Ok(Self::stringify(&json_value))
}
fn stringify_value(value: &Value) -> String {
match value {
Value::Null => "null".to_string(),
Value::Bool(b) => if *b { "true" } else { "false" }.to_string(),
Value::Number(n) => n.to_string(),
Value::String(s) => Self::stringify_string(s),
Value::Array(arr) => Self::stringify_array(arr),
Value::Object(obj) => Self::stringify_object(obj),
}
}
fn stringify_string(s: &str) -> String {
// Proper JSON string escaping
let mut result = String::with_capacity(s.len() + 2);
result.push('"');
for c in s.chars() {
match c {
'"' => result.push_str("\\\""),
'\\' => result.push_str("\\\\"),
'\n' => result.push_str("\\n"),
'\r' => result.push_str("\\r"),
'\t' => result.push_str("\\t"),
c if c.is_control() => {
result.push_str(&format!("\\u{:04x}", c as u32));
}
c => result.push(c),
}
}
result.push('"');
result
}
fn stringify_array(arr: &[Value]) -> String {
let items: Vec<String> = arr.iter().map(Self::stringify_value).collect();
format!("[{}]", items.join(","))
}
fn stringify_object(obj: &serde_json::Map<String, Value>) -> String {
// Sort keys alphabetically for deterministic output
let mut keys: Vec<&String> = obj.keys().collect();
keys.sort();
let pairs: Vec<String> = keys.iter()
.filter_map(|k| obj.get(*k).map(|v| {
format!("{}:{}", Self::stringify_string(k), Self::stringify_value(v))
}))
.collect();
format!("{{{}}}", pairs.join(","))
}
}
/// Compute hash of canonical JSON representation
pub fn canonical_hash<T: Serialize>(value: &T) -> Result<[u8; 32], serde_json::Error> {
let canonical = CanonicalJson::serialize(value)?;
Ok(CryptoV2::hash(canonical.as_bytes()))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encrypt_decrypt() {
let key = [1u8; 32];
let data = b"Hello, World!";
let encrypted = CryptoV2::encrypt(data, &key).unwrap();
let decrypted = CryptoV2::decrypt(&encrypted, &key).unwrap();
assert_eq!(data.as_slice(), decrypted.as_slice());
}
#[test]
fn test_decrypt_wrong_key_fails() {
let key1 = [1u8; 32];
let key2 = [2u8; 32];
let data = b"Secret data";
let encrypted = CryptoV2::encrypt(data, &key1).unwrap();
let result = CryptoV2::decrypt(&encrypted, &key2);
assert!(result.is_err());
}
#[test]
fn test_canonical_json_sorted_keys() {
let json = serde_json::json!({
"z": 1,
"a": 2,
"m": 3
});
let canonical = CanonicalJson::stringify(&json);
assert_eq!(canonical, r#"{"a":2,"m":3,"z":1}"#);
}
#[test]
fn test_canonical_json_nested() {
let json = serde_json::json!({
"b": {
"z": 1,
"a": 2
},
"a": [3, 2, 1]
});
let canonical = CanonicalJson::stringify(&json);
assert_eq!(canonical, r#"{"a":[3,2,1],"b":{"a":2,"z":1}}"#);
}
#[test]
fn test_canonical_json_escaping() {
let json = serde_json::json!({
"text": "hello\nworld"
});
let canonical = CanonicalJson::stringify(&json);
assert_eq!(canonical, r#"{"text":"hello\nworld"}"#);
}
#[test]
fn test_canonical_hash_deterministic() {
#[derive(Serialize)]
struct Data {
z: u32,
a: u32,
}
let data1 = Data { z: 1, a: 2 };
let data2 = Data { z: 1, a: 2 };
let hash1 = canonical_hash(&data1).unwrap();
let hash2 = canonical_hash(&data2).unwrap();
assert_eq!(hash1, hash2);
}
}

View File

@@ -0,0 +1,479 @@
//! Message Envelopes - Signed and Encrypted Messages
//!
//! Security principles:
//! - All messages are signed with Ed25519
//! - Signatures cover canonical representation of all fields
//! - TaskReceipt includes full execution binding
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use crate::p2p::crypto::{EncryptedPayload, CanonicalJson, CryptoV2};
/// Signed message envelope for P2P communication
///
/// Design:
/// - Header fields are signed but not encrypted
/// - Payload is encrypted with swarm key (for broadcast) or session key (for direct)
/// - Sender identity verified via registry, NOT from this envelope
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SignedEnvelope {
// Header (signed but not encrypted)
pub message_id: String,
pub topic: String,
pub timestamp: u64,
pub sender_id: String,
#[serde(with = "hex::serde")]
pub payload_hash: [u8; 32],
pub nonce: String,
pub counter: u64,
// Signature covers canonical representation of all header fields
#[serde(with = "BigArray")]
pub signature: [u8; 64],
// Encrypted payload (swarm key or session key)
pub encrypted: EncryptedPayload,
}
impl SignedEnvelope {
/// Create canonical representation of header for signing
/// Keys are sorted alphabetically for deterministic output
pub fn canonical_header(&self) -> String {
// Create a struct with sorted fields for canonical serialization
let header = serde_json::json!({
"counter": self.counter,
"message_id": self.message_id,
"nonce": self.nonce,
"payload_hash": hex::encode(self.payload_hash),
"sender_id": self.sender_id,
"timestamp": self.timestamp,
"topic": self.topic,
});
CanonicalJson::stringify(&header)
}
/// Create unsigned envelope (for signing externally)
pub fn new_unsigned(
message_id: String,
topic: String,
sender_id: String,
payload: &[u8],
nonce: String,
counter: u64,
encrypted: EncryptedPayload,
) -> Self {
Self {
message_id,
topic,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
sender_id,
payload_hash: CryptoV2::hash(payload),
nonce,
counter,
signature: [0u8; 64],
encrypted,
}
}
}
/// Task execution envelope with resource budgets
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskEnvelope {
pub task_id: String,
pub module_cid: String, // WASM module location
pub entrypoint: String, // Function to call
pub input_cid: String, // Input data location
#[serde(with = "hex::serde")]
pub output_schema_hash: [u8; 32],
/// Resource budgets for sandbox
pub budgets: TaskBudgets,
/// Requester info
pub requester: String,
pub deadline: u64,
pub priority: u8,
/// Canonical hash of this envelope (for receipts)
#[serde(with = "hex::serde")]
pub envelope_hash: [u8; 32],
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskBudgets {
pub fuel_limit: u64, // Wasmtime fuel
pub memory_mb: u32, // Max memory in MB
pub timeout_ms: u64, // Max execution time
}
impl TaskEnvelope {
/// Create task envelope with computed hash
pub fn new(
task_id: String,
module_cid: String,
entrypoint: String,
input_cid: String,
output_schema_hash: [u8; 32],
budgets: TaskBudgets,
requester: String,
deadline: u64,
priority: u8,
) -> Self {
let mut envelope = Self {
task_id,
module_cid,
entrypoint,
input_cid,
output_schema_hash,
budgets,
requester,
deadline,
priority,
envelope_hash: [0u8; 32],
};
// Compute envelope hash (excluding envelope_hash field)
envelope.envelope_hash = envelope.compute_hash();
envelope
}
/// Compute canonical hash of envelope (excluding envelope_hash itself)
fn compute_hash(&self) -> [u8; 32] {
let for_hash = serde_json::json!({
"budgets": {
"fuel_limit": self.budgets.fuel_limit,
"memory_mb": self.budgets.memory_mb,
"timeout_ms": self.budgets.timeout_ms,
},
"deadline": self.deadline,
"entrypoint": self.entrypoint,
"input_cid": self.input_cid,
"module_cid": self.module_cid,
"output_schema_hash": hex::encode(self.output_schema_hash),
"priority": self.priority,
"requester": self.requester,
"task_id": self.task_id,
});
let canonical = CanonicalJson::stringify(&for_hash);
CryptoV2::hash(canonical.as_bytes())
}
}
/// Task result receipt with full execution binding
///
/// Security: Signature covers ALL fields to prevent tampering
/// Including binding to original TaskEnvelope for traceability
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskReceipt {
// Core result
pub task_id: String,
pub executor: String,
pub result_cid: String,
pub status: TaskStatus,
// Resource usage
pub fuel_used: u64,
pub memory_peak_mb: u32,
pub execution_ms: u64,
// Execution binding (proves this receipt is for this specific execution)
#[serde(with = "hex::serde")]
pub input_hash: [u8; 32],
#[serde(with = "hex::serde")]
pub output_hash: [u8; 32],
#[serde(with = "hex::serde")]
pub module_hash: [u8; 32],
pub start_timestamp: u64,
pub end_timestamp: u64,
// TaskEnvelope binding (proves this receipt matches original task)
pub module_cid: String,
pub input_cid: String,
pub entrypoint: String,
#[serde(with = "hex::serde")]
pub output_schema_hash: [u8; 32],
#[serde(with = "hex::serde")]
pub task_envelope_hash: [u8; 32],
// Signature covers ALL fields above
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum TaskStatus {
Success,
Error,
Timeout,
OutOfMemory,
}
impl TaskReceipt {
/// Create canonical representation for signing
/// Includes ALL fields for full execution binding
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"end_timestamp": self.end_timestamp,
"entrypoint": self.entrypoint,
"execution_ms": self.execution_ms,
"executor": self.executor,
"fuel_used": self.fuel_used,
"input_cid": self.input_cid,
"input_hash": hex::encode(self.input_hash),
"memory_peak_mb": self.memory_peak_mb,
"module_cid": self.module_cid,
"module_hash": hex::encode(self.module_hash),
"output_hash": hex::encode(self.output_hash),
"output_schema_hash": hex::encode(self.output_schema_hash),
"result_cid": self.result_cid,
"start_timestamp": self.start_timestamp,
"status": format!("{:?}", self.status),
"task_envelope_hash": hex::encode(self.task_envelope_hash),
"task_id": self.task_id,
});
CanonicalJson::stringify(&for_signing)
}
/// Create unsigned receipt (for signing externally)
pub fn new_unsigned(
task: &TaskEnvelope,
executor: String,
result_cid: String,
status: TaskStatus,
fuel_used: u64,
memory_peak_mb: u32,
execution_ms: u64,
input_hash: [u8; 32],
output_hash: [u8; 32],
module_hash: [u8; 32],
start_timestamp: u64,
end_timestamp: u64,
) -> Self {
Self {
task_id: task.task_id.clone(),
executor,
result_cid,
status,
fuel_used,
memory_peak_mb,
execution_ms,
input_hash,
output_hash,
module_hash,
start_timestamp,
end_timestamp,
module_cid: task.module_cid.clone(),
input_cid: task.input_cid.clone(),
entrypoint: task.entrypoint.clone(),
output_schema_hash: task.output_schema_hash,
task_envelope_hash: task.envelope_hash,
signature: [0u8; 64],
}
}
}
/// Signaling message for WebRTC (via GUN)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SignalingMessage {
pub signal_type: SignalType,
pub from: String,
pub to: String,
pub payload: String,
#[serde(with = "hex::serde")]
pub payload_hash: [u8; 32],
pub timestamp: u64,
pub expires_at: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum SignalType {
Offer,
Answer,
Ice,
}
impl SignalingMessage {
/// Create canonical representation for signing
/// Does NOT include sender_pubkey - verified via registry
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"expires_at": self.expires_at,
"from": self.from,
"payload_hash": hex::encode(self.payload_hash),
"signal_type": format!("{:?}", self.signal_type),
"timestamp": self.timestamp,
"to": self.to,
});
CanonicalJson::stringify(&for_signing)
}
/// Create unsigned signaling message
pub fn new_unsigned(
signal_type: SignalType,
from: String,
to: String,
payload: String,
ttl_ms: u64,
) -> Self {
let now = chrono::Utc::now().timestamp_millis() as u64;
Self {
signal_type,
from,
to,
payload_hash: CryptoV2::hash(payload.as_bytes()),
payload,
timestamp: now,
expires_at: now + ttl_ms,
signature: [0u8; 64],
}
}
/// Check if signal is expired
pub fn is_expired(&self) -> bool {
let now = chrono::Utc::now().timestamp_millis() as u64;
now > self.expires_at
}
}
/// Artifact pointer (small metadata that goes to GUN)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ArtifactPointer {
pub artifact_type: ArtifactType,
pub agent_id: String,
pub cid: String,
pub version: u32,
#[serde(with = "hex::serde")]
pub schema_hash: [u8; 8],
pub dimensions: String,
#[serde(with = "hex::serde")]
pub checksum: [u8; 16],
pub timestamp: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ArtifactType {
QTable,
MemoryVectors,
ModelWeights,
Trajectory,
}
impl ArtifactPointer {
/// Create canonical representation for signing
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"agent_id": self.agent_id,
"artifact_type": format!("{:?}", self.artifact_type),
"checksum": hex::encode(self.checksum),
"cid": self.cid,
"dimensions": self.dimensions,
"schema_hash": hex::encode(self.schema_hash),
"timestamp": self.timestamp,
"version": self.version,
});
CanonicalJson::stringify(&for_signing)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_envelope_canonical_header() {
let encrypted = EncryptedPayload {
ciphertext: vec![1, 2, 3],
iv: [0u8; 12],
tag: [0u8; 16],
};
let envelope = SignedEnvelope::new_unsigned(
"msg-001".to_string(),
"test-topic".to_string(),
"sender-001".to_string(),
b"test payload",
"abc123".to_string(),
1,
encrypted,
);
let canonical1 = envelope.canonical_header();
let canonical2 = envelope.canonical_header();
// Must be deterministic
assert_eq!(canonical1, canonical2);
// Must contain all fields
assert!(canonical1.contains("msg-001"));
assert!(canonical1.contains("test-topic"));
assert!(canonical1.contains("sender-001"));
}
#[test]
fn test_task_receipt_includes_all_binding_fields() {
let task = TaskEnvelope::new(
"task-001".to_string(),
"local:abc123".to_string(),
"process".to_string(),
"local:input123".to_string(),
[0u8; 32],
TaskBudgets {
fuel_limit: 1000000,
memory_mb: 128,
timeout_ms: 30000,
},
"requester-001".to_string(),
chrono::Utc::now().timestamp_millis() as u64 + 60000,
1,
);
let receipt = TaskReceipt::new_unsigned(
&task,
"executor-001".to_string(),
"local:result123".to_string(),
TaskStatus::Success,
500000,
64,
1500,
[1u8; 32],
[2u8; 32],
[3u8; 32],
1000,
2500,
);
let canonical = receipt.canonical_for_signing();
// Must include execution binding fields
assert!(canonical.contains("input_hash"));
assert!(canonical.contains("output_hash"));
assert!(canonical.contains("module_hash"));
// Must include task envelope binding
assert!(canonical.contains("module_cid"));
assert!(canonical.contains("input_cid"));
assert!(canonical.contains("entrypoint"));
assert!(canonical.contains("task_envelope_hash"));
}
#[test]
fn test_signaling_message_no_pubkey_in_signature() {
let signal = SignalingMessage::new_unsigned(
SignalType::Offer,
"alice".to_string(),
"bob".to_string(),
"sdp data here".to_string(),
60000,
);
let canonical = signal.canonical_for_signing();
// Should NOT contain any pubkey field
assert!(!canonical.contains("pubkey"));
assert!(!canonical.contains("public_key"));
}
}

View File

@@ -0,0 +1,431 @@
//! Identity Manager - Ed25519 + X25519 Key Management
//!
//! Security principles:
//! - Ed25519 for signing (identity keys)
//! - X25519 for key exchange (session keys)
//! - Never trust pubkeys from envelopes - only from signed registry
//! - Per-sender nonce tracking with timestamps for expiry
//! - Separate send/receive counters
use ed25519_dalek::{SigningKey, VerifyingKey, Signature, Signer, Verifier};
use x25519_dalek::{StaticSecret, PublicKey as X25519PublicKey};
use hkdf::Hkdf;
use sha2::Sha256;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
/// Ed25519 key pair for identity/signing
#[derive(Clone)]
pub struct KeyPair {
signing_key: SigningKey,
verifying_key: VerifyingKey,
}
impl KeyPair {
pub fn generate() -> Self {
let signing_key = SigningKey::generate(&mut OsRng);
let verifying_key = signing_key.verifying_key();
Self { signing_key, verifying_key }
}
pub fn public_key_bytes(&self) -> [u8; 32] {
self.verifying_key.to_bytes()
}
pub fn public_key_base64(&self) -> String {
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, self.public_key_bytes())
}
pub fn sign(&self, message: &[u8]) -> Signature {
self.signing_key.sign(message)
}
pub fn verify(public_key: &[u8; 32], message: &[u8], signature: &[u8; 64]) -> bool {
let Ok(verifying_key) = VerifyingKey::from_bytes(public_key) else {
return false;
};
let sig = Signature::from_bytes(signature);
verifying_key.verify(message, &sig).is_ok()
}
}
/// Registered member in the swarm (from signed registry)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RegisteredMember {
pub agent_id: String,
#[serde(with = "hex::serde")]
pub ed25519_public_key: [u8; 32],
#[serde(with = "hex::serde")]
pub x25519_public_key: [u8; 32],
pub capabilities: Vec<String>,
pub joined_at: u64,
pub last_heartbeat: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
impl RegisteredMember {
/// Verify the registration signature
/// Signature covers: agent_id || ed25519_key || x25519_key || capabilities || joined_at
pub fn verify(&self) -> bool {
let canonical = self.canonical_data();
KeyPair::verify(&self.ed25519_public_key, &canonical, &self.signature)
}
/// Create canonical data for signing
pub fn canonical_data(&self) -> Vec<u8> {
let mut data = Vec::new();
data.extend_from_slice(self.agent_id.as_bytes());
data.push(0); // separator
data.extend_from_slice(&self.ed25519_public_key);
data.extend_from_slice(&self.x25519_public_key);
for cap in &self.capabilities {
data.extend_from_slice(cap.as_bytes());
data.push(0);
}
data.extend_from_slice(&self.joined_at.to_le_bytes());
data
}
}
/// Per-sender nonce tracking entry
struct NonceEntry {
timestamp: u64,
}
/// Identity Manager with registry-based trust
pub struct IdentityManager {
/// Our Ed25519 identity keypair
identity_key: KeyPair,
/// Our X25519 key for ECDH
x25519_secret: StaticSecret,
x25519_public: X25519PublicKey,
/// Derived session keys per peer (cache)
session_keys: Arc<RwLock<HashMap<String, [u8; 32]>>>,
/// Registry of verified members - THE SOURCE OF TRUTH
/// Never trust keys from envelopes, only from here
member_registry: Arc<RwLock<HashMap<String, RegisteredMember>>>,
/// Per-sender nonce tracking: senderId -> (nonce -> entry)
seen_nonces: Arc<RwLock<HashMap<String, HashMap<String, NonceEntry>>>>,
/// Local monotonic send counter
send_counter: Arc<RwLock<u64>>,
/// Per-peer receive counters
recv_counters: Arc<RwLock<HashMap<String, u64>>>,
/// Max nonce age (5 minutes)
max_nonce_age_ms: u64,
}
impl IdentityManager {
pub fn new() -> Self {
let identity_key = KeyPair::generate();
let x25519_secret = StaticSecret::random_from_rng(OsRng);
let x25519_public = X25519PublicKey::from(&x25519_secret);
Self {
identity_key,
x25519_secret,
x25519_public,
session_keys: Arc::new(RwLock::new(HashMap::new())),
member_registry: Arc::new(RwLock::new(HashMap::new())),
seen_nonces: Arc::new(RwLock::new(HashMap::new())),
send_counter: Arc::new(RwLock::new(0)),
recv_counters: Arc::new(RwLock::new(HashMap::new())),
max_nonce_age_ms: 300_000, // 5 minutes
}
}
/// Get our Ed25519 public key
pub fn public_key(&self) -> [u8; 32] {
self.identity_key.public_key_bytes()
}
/// Get our X25519 public key
pub fn x25519_public_key(&self) -> [u8; 32] {
self.x25519_public.to_bytes()
}
/// Sign data with our identity key
pub fn sign(&self, data: &[u8]) -> [u8; 64] {
self.identity_key.sign(data).to_bytes()
}
/// Verify signature using ONLY the registry key
/// Never use a key from the message itself
pub fn verify_from_registry(&self, sender_id: &str, data: &[u8], signature: &[u8; 64]) -> bool {
let registry = self.member_registry.read();
let Some(member) = registry.get(sender_id) else {
tracing::warn!("verify_from_registry: sender not in registry: {}", sender_id);
return false;
};
KeyPair::verify(&member.ed25519_public_key, data, signature)
}
/// Register a member from a signed registration
/// Verifies the signature before adding
pub fn register_member(&self, member: RegisteredMember) -> bool {
if !member.verify() {
tracing::warn!("register_member: invalid signature for {}", member.agent_id);
return false;
}
let mut registry = self.member_registry.write();
// If already registered, only accept if from same key
if let Some(existing) = registry.get(&member.agent_id) {
if existing.ed25519_public_key != member.ed25519_public_key {
tracing::warn!("register_member: key mismatch for {}", member.agent_id);
return false;
}
}
registry.insert(member.agent_id.clone(), member);
true
}
/// Get registered member by ID
pub fn get_member(&self, agent_id: &str) -> Option<RegisteredMember> {
self.member_registry.read().get(agent_id).cloned()
}
/// Update member heartbeat
pub fn update_heartbeat(&self, agent_id: &str) {
let mut registry = self.member_registry.write();
if let Some(member) = registry.get_mut(agent_id) {
member.last_heartbeat = chrono::Utc::now().timestamp_millis() as u64;
}
}
/// Get active members (heartbeat within threshold)
pub fn get_active_members(&self, heartbeat_threshold_ms: u64) -> Vec<RegisteredMember> {
let now = chrono::Utc::now().timestamp_millis() as u64;
let registry = self.member_registry.read();
registry.values()
.filter(|m| now - m.last_heartbeat < heartbeat_threshold_ms)
.cloned()
.collect()
}
/// Create our registration (for publishing to registry)
pub fn create_registration(&self, agent_id: &str, capabilities: Vec<String>) -> RegisteredMember {
let joined_at = chrono::Utc::now().timestamp_millis() as u64;
let mut member = RegisteredMember {
agent_id: agent_id.to_string(),
ed25519_public_key: self.public_key(),
x25519_public_key: self.x25519_public_key(),
capabilities,
joined_at,
last_heartbeat: joined_at,
signature: [0u8; 64],
};
// Sign the canonical data
let canonical = member.canonical_data();
member.signature = self.sign(&canonical);
member
}
/// Derive session key with peer using X25519 ECDH + HKDF
/// Uses ONLY the X25519 key from the registry
pub fn derive_session_key(&self, peer_id: &str, swarm_id: &str) -> Option<[u8; 32]> {
let cache_key = format!("{}:{}", peer_id, swarm_id);
// Check cache
{
let cache = self.session_keys.read();
if let Some(key) = cache.get(&cache_key) {
return Some(*key);
}
}
// Get peer's X25519 key from registry ONLY
let registry = self.member_registry.read();
let peer = registry.get(peer_id)?;
let peer_x25519 = X25519PublicKey::from(peer.x25519_public_key);
// X25519 ECDH
let shared_secret = self.x25519_secret.diffie_hellman(&peer_x25519);
// HKDF with stable salt from both public keys
let my_key = self.x25519_public.as_bytes();
let peer_key = peer.x25519_public_key;
// Salt = sha256(min(pubA, pubB) || max(pubA, pubB))
use sha2::Digest;
let salt = if my_key < &peer_key {
let mut hasher = Sha256::new();
hasher.update(my_key);
hasher.update(&peer_key);
hasher.finalize()
} else {
let mut hasher = Sha256::new();
hasher.update(&peer_key);
hasher.update(my_key);
hasher.finalize()
};
// Info only includes swarm_id (salt already includes both parties' public keys for pair separation)
// This ensures both parties derive the same key
let info = format!("p2p-swarm-v2:{}", swarm_id);
let hkdf = Hkdf::<Sha256>::new(Some(&salt), shared_secret.as_bytes());
let mut session_key = [0u8; 32];
hkdf.expand(info.as_bytes(), &mut session_key).ok()?;
// Cache
self.session_keys.write().insert(cache_key, session_key);
Some(session_key)
}
/// Generate cryptographic nonce
pub fn generate_nonce() -> String {
let mut bytes = [0u8; 16];
rand::RngCore::fill_bytes(&mut OsRng, &mut bytes);
hex::encode(bytes)
}
/// Check nonce validity with per-sender tracking
pub fn check_nonce(&self, nonce: &str, timestamp: u64, sender_id: &str) -> bool {
let now = chrono::Utc::now().timestamp_millis() as u64;
// Reject old messages
if now.saturating_sub(timestamp) > self.max_nonce_age_ms {
return false;
}
// Reject future timestamps (1 minute tolerance for clock skew)
if timestamp > now + 60_000 {
return false;
}
let mut nonces = self.seen_nonces.write();
let sender_nonces = nonces.entry(sender_id.to_string()).or_insert_with(HashMap::new);
// Reject replayed nonces
if sender_nonces.contains_key(nonce) {
return false;
}
// Record nonce
sender_nonces.insert(nonce.to_string(), NonceEntry { timestamp });
true
}
/// Cleanup expired nonces
pub fn cleanup_nonces(&self) {
let now = chrono::Utc::now().timestamp_millis() as u64;
let mut nonces = self.seen_nonces.write();
for sender_nonces in nonces.values_mut() {
sender_nonces.retain(|_, entry| now - entry.timestamp < self.max_nonce_age_ms);
}
nonces.retain(|_, v| !v.is_empty());
}
/// Get next send counter
pub fn next_send_counter(&self) -> u64 {
let mut counter = self.send_counter.write();
*counter += 1;
*counter
}
/// Validate receive counter (must be > last seen from peer)
pub fn validate_recv_counter(&self, peer_id: &str, counter: u64) -> bool {
let mut counters = self.recv_counters.write();
let last_seen = counters.get(peer_id).copied().unwrap_or(0);
if counter <= last_seen {
return false;
}
counters.insert(peer_id.to_string(), counter);
true
}
/// Rotate session key for peer
pub fn rotate_session_key(&self, peer_id: &str) {
self.session_keys.write().retain(|k, _| !k.starts_with(peer_id));
}
}
impl Default for IdentityManager {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_keypair_sign_verify() {
let keypair = KeyPair::generate();
let message = b"test message";
let signature = keypair.sign(message);
assert!(KeyPair::verify(
&keypair.public_key_bytes(),
message,
&signature.to_bytes()
));
}
#[test]
fn test_member_registration() {
let identity = IdentityManager::new();
let registration = identity.create_registration("test-agent", vec!["executor".to_string()]);
assert!(registration.verify());
assert!(identity.register_member(registration));
}
#[test]
fn test_session_key_derivation() {
let alice = IdentityManager::new();
let bob = IdentityManager::new();
// Register each other with valid capabilities
let alice_reg = alice.create_registration("alice", vec!["worker".to_string()]);
let bob_reg = bob.create_registration("bob", vec!["worker".to_string()]);
// Register in each other's registry
alice.register_member(bob_reg.clone());
bob.register_member(alice_reg.clone());
// Derive session keys - both should derive the same key
let alice_key = alice.derive_session_key("bob", "test-swarm").unwrap();
let bob_key = bob.derive_session_key("alice", "test-swarm").unwrap();
// Keys should match (symmetric ECDH)
assert_eq!(alice_key, bob_key, "Session keys should be symmetric");
}
#[test]
fn test_nonce_replay_protection() {
let identity = IdentityManager::new();
let nonce = IdentityManager::generate_nonce();
let timestamp = chrono::Utc::now().timestamp_millis() as u64;
// First use should succeed
assert!(identity.check_nonce(&nonce, timestamp, "sender-1"));
// Replay should fail
assert!(!identity.check_nonce(&nonce, timestamp, "sender-1"));
// Different sender can use same nonce (per-sender tracking)
assert!(identity.check_nonce(&nonce, timestamp, "sender-2"));
}
}

View File

@@ -0,0 +1,53 @@
//! P2P Swarm v2 - Production Grade Rust Implementation
//!
//! Features:
//! - Ed25519 identity keys + X25519 ephemeral keys for ECDH
//! - AES-256-GCM authenticated encryption
//! - Message replay protection (nonces, counters, timestamps)
//! - GUN-based signaling (no external PeerServer)
//! - IPFS CID pointers for large payloads
//! - Ed25519 signatures on all messages
//! - Relay health monitoring
//! - Task execution envelope with resource budgets
//! - WASM compatible
//! - Quantization (4-32x compression)
//! - Hyperdimensional Computing for pattern matching
mod identity;
mod crypto;
mod relay;
mod artifact;
mod envelope;
#[cfg(feature = "native")]
mod swarm;
mod advanced;
pub use identity::{IdentityManager, KeyPair, RegisteredMember};
pub use crypto::{CryptoV2, EncryptedPayload, CanonicalJson};
pub use relay::RelayManager;
pub use artifact::ArtifactStore;
pub use envelope::{SignedEnvelope, TaskEnvelope, TaskReceipt, ArtifactPointer};
#[cfg(feature = "native")]
pub use swarm::{P2PSwarmV2, SwarmStatus};
pub use advanced::{
// Quantization
ScalarQuantized, BinaryQuantized, CompressedData,
// Hyperdimensional Computing
Hypervector, HdcMemory, HDC_DIMENSION,
// Adaptive compression
AdaptiveCompressor, NetworkCondition,
// Pattern routing
PatternRouter,
// HNSW vector index
HnswIndex,
// Post-quantum crypto
HybridKeyPair, HybridPublicKey, HybridSignature,
// Spiking neural networks
LIFNeuron, SpikingNetwork,
// Semantic embeddings
SemanticEmbedder, SemanticTaskMatcher,
// Raft consensus
RaftNode, RaftState, LogEntry,
RaftVoteRequest, RaftVoteResponse,
RaftAppendEntries, RaftAppendEntriesResponse,
};

View File

@@ -0,0 +1,219 @@
//! Relay Manager - GUN Relay Health Monitoring
//!
//! Tracks relay health, latency, and provides automatic failover
use std::collections::HashMap;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
/// Relay health status
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelayStatus {
pub url: String,
pub healthy: bool,
pub last_check: u64,
pub latency_ms: u32,
pub failures: u8,
}
impl RelayStatus {
fn new(url: String) -> Self {
Self {
url,
healthy: true, // Assume healthy initially
last_check: 0,
latency_ms: 0,
failures: 0,
}
}
}
/// Bootstrap GUN relays (public infrastructure)
pub const BOOTSTRAP_RELAYS: &[&str] = &[
"https://gun-manhattan.herokuapp.com/gun",
"https://gun-us.herokuapp.com/gun",
"https://gun-eu.herokuapp.com/gun",
];
/// Relay Manager for health tracking and failover
pub struct RelayManager {
relays: Arc<RwLock<HashMap<String, RelayStatus>>>,
max_failures: u8,
}
impl RelayManager {
/// Create with bootstrap relays
pub fn new() -> Self {
Self::with_relays(BOOTSTRAP_RELAYS.iter().map(|s| s.to_string()).collect())
}
/// Create with custom relays
pub fn with_relays(relay_urls: Vec<String>) -> Self {
let mut relays = HashMap::new();
for url in relay_urls {
relays.insert(url.clone(), RelayStatus::new(url));
}
Self {
relays: Arc::new(RwLock::new(relays)),
max_failures: 3,
}
}
/// Get list of healthy relays
pub fn get_healthy_relays(&self) -> Vec<String> {
self.relays.read()
.values()
.filter(|r| r.healthy)
.map(|r| r.url.clone())
.collect()
}
/// Get all relays (for initial connection attempts)
pub fn get_all_relays(&self) -> Vec<String> {
self.relays.read()
.keys()
.cloned()
.collect()
}
/// Mark relay as successful
pub fn mark_success(&self, url: &str, latency_ms: u32) {
let mut relays = self.relays.write();
if let Some(relay) = relays.get_mut(url) {
relay.healthy = true;
relay.latency_ms = latency_ms;
relay.failures = 0;
relay.last_check = chrono::Utc::now().timestamp_millis() as u64;
}
}
/// Mark relay as failed
pub fn mark_failed(&self, url: &str) {
let mut relays = self.relays.write();
if let Some(relay) = relays.get_mut(url) {
relay.failures += 1;
relay.last_check = chrono::Utc::now().timestamp_millis() as u64;
if relay.failures >= self.max_failures {
relay.healthy = false;
tracing::warn!("Relay marked unhealthy: {} (failures: {})", url, relay.failures);
}
}
}
/// Add new relay
pub fn add_relay(&self, url: String) {
let mut relays = self.relays.write();
if !relays.contains_key(&url) {
relays.insert(url.clone(), RelayStatus::new(url));
}
}
/// Remove relay
pub fn remove_relay(&self, url: &str) {
self.relays.write().remove(url);
}
/// Get relay metrics
pub fn get_metrics(&self) -> RelayMetrics {
let relays = self.relays.read();
let healthy_relays: Vec<_> = relays.values().filter(|r| r.healthy).collect();
let avg_latency = if healthy_relays.is_empty() {
0
} else {
healthy_relays.iter().map(|r| r.latency_ms as u64).sum::<u64>() / healthy_relays.len() as u64
};
RelayMetrics {
total: relays.len(),
healthy: healthy_relays.len(),
avg_latency_ms: avg_latency as u32,
}
}
/// Export working relay set (for persistence)
pub fn export_working_set(&self) -> Vec<String> {
self.get_healthy_relays()
}
/// Import relay set
pub fn import_relays(&self, urls: Vec<String>) {
for url in urls {
self.add_relay(url);
}
}
/// Reset failed relay (give it another chance)
pub fn reset_relay(&self, url: &str) {
let mut relays = self.relays.write();
if let Some(relay) = relays.get_mut(url) {
relay.healthy = true;
relay.failures = 0;
}
}
/// Get relay by URL
pub fn get_relay(&self, url: &str) -> Option<RelayStatus> {
self.relays.read().get(url).cloned()
}
}
impl Default for RelayManager {
fn default() -> Self {
Self::new()
}
}
/// Relay health metrics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelayMetrics {
pub total: usize,
pub healthy: usize,
pub avg_latency_ms: u32,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_relay_manager() {
let manager = RelayManager::new();
// Should start with bootstrap relays
let relays = manager.get_all_relays();
assert!(!relays.is_empty());
// All should be healthy initially
let healthy = manager.get_healthy_relays();
assert_eq!(relays.len(), healthy.len());
}
#[test]
fn test_relay_failure_tracking() {
let manager = RelayManager::with_relays(vec!["http://test:8080".to_string()]);
// Mark failed 3 times
manager.mark_failed("http://test:8080");
manager.mark_failed("http://test:8080");
assert!(manager.get_healthy_relays().len() == 1); // Still healthy
manager.mark_failed("http://test:8080");
assert!(manager.get_healthy_relays().is_empty()); // Now unhealthy
}
#[test]
fn test_relay_success_resets_failures() {
let manager = RelayManager::with_relays(vec!["http://test:8080".to_string()]);
manager.mark_failed("http://test:8080");
manager.mark_failed("http://test:8080");
manager.mark_success("http://test:8080", 50);
// Should still be healthy after success
assert!(!manager.get_healthy_relays().is_empty());
}
}

View File

@@ -0,0 +1,611 @@
//! P2P Swarm v2 - Production Grade Coordinator
//!
//! Features:
//! - Registry-based identity (never trust envelope keys)
//! - Membership watcher with heartbeats
//! - Task executor loop with claiming
//! - Artifact publishing and retrieval
use crate::p2p::{
identity::{IdentityManager, RegisteredMember},
crypto::{CryptoV2, CanonicalJson},
relay::RelayManager,
artifact::ArtifactStore,
envelope::{
SignedEnvelope, TaskEnvelope, TaskReceipt, TaskStatus, TaskBudgets,
SignalingMessage, SignalType, ArtifactPointer, ArtifactType,
},
};
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc;
/// Heartbeat interval (30 seconds)
pub const HEARTBEAT_INTERVAL_MS: u64 = 30_000;
/// Heartbeat timeout (2 minutes)
pub const HEARTBEAT_TIMEOUT_MS: u64 = 120_000;
/// Task claim window (5 seconds)
pub const TASK_CLAIM_WINDOW_MS: u64 = 5_000;
/// Swarm status
#[derive(Debug, Clone)]
pub struct SwarmStatus {
pub connected: bool,
pub swarm_id: String,
pub agent_id: String,
pub public_key: [u8; 32],
pub active_peers: usize,
pub pending_tasks: usize,
pub relay_metrics: crate::p2p::relay::RelayMetrics,
}
/// Message handler callback type
pub type MessageHandler = Box<dyn Fn(&[u8], &str) + Send + Sync>;
/// Task claim record
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskClaim {
pub task_id: String,
pub executor_id: String,
pub timestamp: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
/// Heartbeat message
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Heartbeat {
pub agent_id: String,
pub timestamp: u64,
pub capabilities: Vec<String>,
pub load: f32, // 0.0 to 1.0
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
impl Heartbeat {
/// Create canonical representation for signing
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"agent_id": self.agent_id,
"capabilities": self.capabilities,
"load": self.load,
"timestamp": self.timestamp,
});
CanonicalJson::stringify(&for_signing)
}
}
/// P2P Swarm Coordinator
pub struct P2PSwarmV2 {
/// Our identity manager
identity: Arc<IdentityManager>,
/// Relay manager
relay_manager: Arc<RelayManager>,
/// Artifact store
artifact_store: Arc<ArtifactStore>,
/// Swarm key (for envelope encryption)
swarm_key: [u8; 32],
/// Swarm ID (derived from swarm key)
swarm_id: String,
/// Our agent ID
agent_id: String,
/// Our capabilities
capabilities: Vec<String>,
/// Connection state
connected: Arc<RwLock<bool>>,
/// Message handlers by topic
handlers: Arc<RwLock<HashMap<String, MessageHandler>>>,
/// Pending tasks
pending_tasks: Arc<RwLock<HashMap<String, TaskEnvelope>>>,
/// Task claims
task_claims: Arc<RwLock<HashMap<String, TaskClaim>>>,
/// Shutdown signal
shutdown_tx: Option<mpsc::Sender<()>>,
}
impl P2PSwarmV2 {
/// Create new swarm coordinator
pub fn new(agent_id: &str, swarm_key: Option<[u8; 32]>, capabilities: Vec<String>) -> Self {
let identity = Arc::new(IdentityManager::new());
let relay_manager = Arc::new(RelayManager::new());
let artifact_store = Arc::new(ArtifactStore::new());
// Generate or use provided swarm key
let swarm_key = swarm_key.unwrap_or_else(|| {
let mut key = [0u8; 32];
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut key);
key
});
// Derive swarm ID from key
let swarm_id = hex::encode(&CryptoV2::hash(&swarm_key)[..8]);
Self {
identity,
relay_manager,
artifact_store,
swarm_key,
swarm_id,
agent_id: agent_id.to_string(),
capabilities,
connected: Arc::new(RwLock::new(false)),
handlers: Arc::new(RwLock::new(HashMap::new())),
pending_tasks: Arc::new(RwLock::new(HashMap::new())),
task_claims: Arc::new(RwLock::new(HashMap::new())),
shutdown_tx: None,
}
}
/// Get swarm key (for sharing with peers)
pub fn swarm_key(&self) -> [u8; 32] {
self.swarm_key
}
/// Get swarm key as base64
pub fn swarm_key_base64(&self) -> String {
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, self.swarm_key)
}
/// Connect to swarm
pub async fn connect(&mut self) -> Result<(), String> {
tracing::info!("Connecting to swarm: {}", self.swarm_id);
// Register ourselves
let registration = self.identity.create_registration(&self.agent_id, self.capabilities.clone());
self.identity.register_member(registration);
*self.connected.write() = true;
// Start heartbeat loop
self.start_heartbeat_loop();
tracing::info!("Connected to swarm {} as {}", self.swarm_id, self.agent_id);
Ok(())
}
/// Disconnect from swarm
pub fn disconnect(&mut self) {
*self.connected.write() = false;
if let Some(tx) = self.shutdown_tx.take() {
let _ = tx.try_send(());
}
tracing::info!("Disconnected from swarm {}", self.swarm_id);
}
/// Start heartbeat loop
fn start_heartbeat_loop(&mut self) {
let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1);
self.shutdown_tx = Some(shutdown_tx);
let identity = self.identity.clone();
let agent_id = self.agent_id.clone();
let capabilities = self.capabilities.clone();
let connected = self.connected.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(HEARTBEAT_INTERVAL_MS));
loop {
tokio::select! {
_ = interval.tick() => {
if !*connected.read() {
break;
}
// Create and sign heartbeat
let mut heartbeat = Heartbeat {
agent_id: agent_id.clone(),
timestamp: chrono::Utc::now().timestamp_millis() as u64,
capabilities: capabilities.clone(),
load: 0.5, // TODO: Measure actual load
signature: [0u8; 64],
};
let canonical = heartbeat.canonical_for_signing();
heartbeat.signature = identity.sign(canonical.as_bytes());
// In real implementation, publish to GUN
tracing::debug!("Published heartbeat for {}", agent_id);
// Cleanup expired nonces
identity.cleanup_nonces();
}
_ = shutdown_rx.recv() => {
tracing::debug!("Heartbeat loop shutting down");
break;
}
}
}
});
}
/// Register a verified member
pub fn register_member(&self, member: RegisteredMember) -> bool {
// Strict validation: require all fields
if member.x25519_public_key == [0u8; 32] {
tracing::warn!("Rejecting member {}: missing x25519_public_key", member.agent_id);
return false;
}
if member.capabilities.is_empty() {
tracing::warn!("Rejecting member {}: empty capabilities", member.agent_id);
return false;
}
if member.joined_at == 0 {
tracing::warn!("Rejecting member {}: invalid joined_at", member.agent_id);
return false;
}
self.identity.register_member(member)
}
/// Resolve member from registry (returns full member, not just key)
pub fn resolve_member(&self, agent_id: &str) -> Option<RegisteredMember> {
self.identity.get_member(agent_id)
}
/// Subscribe to topic with handler
pub fn subscribe(&self, topic: &str, handler: MessageHandler) {
self.handlers.write().insert(topic.to_string(), handler);
}
/// Publish message to topic
pub fn publish(&self, topic: &str, payload: &[u8]) -> Result<String, String> {
if !*self.connected.read() {
return Err("Not connected".to_string());
}
let nonce = IdentityManager::generate_nonce();
let counter = self.identity.next_send_counter();
// Encrypt payload with swarm key
let encrypted = CryptoV2::encrypt(payload, &self.swarm_key)?;
// Create envelope
let mut envelope = SignedEnvelope::new_unsigned(
format!("{}:{}", self.swarm_id, uuid::Uuid::new_v4()),
topic.to_string(),
self.agent_id.clone(),
payload,
nonce,
counter,
encrypted,
);
// Sign canonical header
let canonical = envelope.canonical_header();
envelope.signature = self.identity.sign(canonical.as_bytes());
// In real implementation, publish to GUN
tracing::debug!("Published message {} to topic {}", envelope.message_id, topic);
Ok(envelope.message_id)
}
/// Process incoming envelope (with full verification)
pub fn process_envelope(&self, envelope: &SignedEnvelope) -> Result<Vec<u8>, String> {
// 1. Check nonce and timestamp (replay protection)
if !self.identity.check_nonce(&envelope.nonce, envelope.timestamp, &envelope.sender_id) {
return Err("Replay detected or expired".to_string());
}
// 2. Check counter (ordering)
if !self.identity.validate_recv_counter(&envelope.sender_id, envelope.counter) {
return Err("Invalid counter".to_string());
}
// 3. Resolve sender from registry (NOT from envelope)
let _member = self.resolve_member(&envelope.sender_id)
.ok_or_else(|| format!("Sender {} not in registry", envelope.sender_id))?;
// 4. Verify signature using REGISTRY key
let canonical = envelope.canonical_header();
if !self.identity.verify_from_registry(&envelope.sender_id, canonical.as_bytes(), &envelope.signature) {
return Err("Invalid signature".to_string());
}
// 5. Decrypt with swarm key
let payload = CryptoV2::decrypt(&envelope.encrypted, &self.swarm_key)?;
// 6. Verify payload hash
let payload_hash = CryptoV2::hash(&payload);
if payload_hash != envelope.payload_hash {
return Err("Payload hash mismatch".to_string());
}
// 7. Dispatch to handler
if let Some(handler) = self.handlers.read().get(&envelope.topic) {
handler(&payload, &envelope.sender_id);
}
Ok(payload)
}
/// Create signaling message (WebRTC offer/answer/ice)
pub fn create_signaling(&self, signal_type: SignalType, to: &str, payload: String, ttl_ms: u64) -> SignalingMessage {
let mut signal = SignalingMessage::new_unsigned(
signal_type,
self.agent_id.clone(),
to.to_string(),
payload,
ttl_ms,
);
// Sign using canonical (no pubkey in signed data)
let canonical = signal.canonical_for_signing();
signal.signature = self.identity.sign(canonical.as_bytes());
signal
}
/// Verify signaling message (using REGISTRY key only)
pub fn verify_signaling(&self, signal: &SignalingMessage) -> bool {
// Check expiry
if signal.is_expired() {
return false;
}
// Verify payload hash
let payload_hash = CryptoV2::hash(signal.payload.as_bytes());
if payload_hash != signal.payload_hash {
return false;
}
// Verify signature using REGISTRY key (not from signal)
let canonical = signal.canonical_for_signing();
self.identity.verify_from_registry(&signal.from, canonical.as_bytes(), &signal.signature)
}
/// Store artifact and get CID
pub fn store_artifact(&self, data: &[u8], compress: bool) -> Result<String, String> {
let info = self.artifact_store.store(data, compress)?;
Ok(info.cid)
}
/// Retrieve artifact by CID
pub fn retrieve_artifact(&self, cid: &str) -> Option<Vec<u8>> {
self.artifact_store.retrieve(cid)
}
/// Create and sign artifact pointer
pub fn create_artifact_pointer(&self, artifact_type: ArtifactType, cid: &str, dimensions: &str) -> Option<ArtifactPointer> {
self.artifact_store.create_pointer(artifact_type, &self.agent_id, cid, dimensions, &self.identity)
}
/// Submit task
pub fn submit_task(&self, task: TaskEnvelope) -> Result<String, String> {
let task_id = task.task_id.clone();
self.pending_tasks.write().insert(task_id.clone(), task.clone());
// Publish to tasks topic
let task_bytes = serde_json::to_vec(&task)
.map_err(|e| e.to_string())?;
self.publish("tasks", &task_bytes)?;
Ok(task_id)
}
/// Claim task for execution
pub fn claim_task(&self, task_id: &str) -> Result<TaskClaim, String> {
// Check if task exists
if !self.pending_tasks.read().contains_key(task_id) {
return Err("Task not found".to_string());
}
// Check if already claimed within window
let now = chrono::Utc::now().timestamp_millis() as u64;
if let Some(existing) = self.task_claims.read().get(task_id) {
if now - existing.timestamp < TASK_CLAIM_WINDOW_MS {
return Err(format!("Task already claimed by {}", existing.executor_id));
}
}
// Create claim
let mut claim = TaskClaim {
task_id: task_id.to_string(),
executor_id: self.agent_id.clone(),
timestamp: now,
signature: [0u8; 64],
};
// Sign claim
let canonical = serde_json::json!({
"executor_id": claim.executor_id,
"task_id": claim.task_id,
"timestamp": claim.timestamp,
});
let canonical_str = CanonicalJson::stringify(&canonical);
claim.signature = self.identity.sign(canonical_str.as_bytes());
// Store claim
self.task_claims.write().insert(task_id.to_string(), claim.clone());
Ok(claim)
}
/// Create signed task receipt
pub fn create_receipt(
&self,
task: &TaskEnvelope,
result_cid: String,
status: TaskStatus,
fuel_used: u64,
memory_peak_mb: u32,
execution_ms: u64,
input_hash: [u8; 32],
output_hash: [u8; 32],
module_hash: [u8; 32],
) -> TaskReceipt {
let now = chrono::Utc::now().timestamp_millis() as u64;
let mut receipt = TaskReceipt::new_unsigned(
task,
self.agent_id.clone(),
result_cid,
status,
fuel_used,
memory_peak_mb,
execution_ms,
input_hash,
output_hash,
module_hash,
now - execution_ms,
now,
);
// Sign the FULL canonical receipt (all binding fields)
let canonical = receipt.canonical_for_signing();
receipt.signature = self.identity.sign(canonical.as_bytes());
receipt
}
/// Get swarm status
pub fn status(&self) -> SwarmStatus {
SwarmStatus {
connected: *self.connected.read(),
swarm_id: self.swarm_id.clone(),
agent_id: self.agent_id.clone(),
public_key: self.identity.public_key(),
active_peers: self.identity.get_active_members(HEARTBEAT_TIMEOUT_MS).len(),
pending_tasks: self.pending_tasks.read().len(),
relay_metrics: self.relay_manager.get_metrics(),
}
}
/// Get active peer IDs
pub fn active_peers(&self) -> Vec<String> {
self.identity.get_active_members(HEARTBEAT_TIMEOUT_MS)
.into_iter()
.map(|m| m.agent_id)
.collect()
}
/// Derive session key for direct channel with peer
pub fn derive_session_key(&self, peer_id: &str) -> Option<[u8; 32]> {
self.identity.derive_session_key(peer_id, &self.swarm_id)
}
}
impl Drop for P2PSwarmV2 {
fn drop(&mut self) {
self.disconnect();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_swarm_connect() {
let mut swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
swarm.connect().await.unwrap();
let status = swarm.status();
assert!(status.connected);
assert_eq!(status.agent_id, "test-agent");
}
#[tokio::test]
async fn test_member_registration() {
let swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
// Create another identity
let other_identity = IdentityManager::new();
let registration = other_identity.create_registration("peer-1", vec!["worker".to_string()]);
// Should succeed with valid registration
assert!(swarm.register_member(registration));
// Should be able to resolve
let member = swarm.resolve_member("peer-1");
assert!(member.is_some());
}
#[tokio::test]
async fn test_strict_member_validation() {
let swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
// Registration with missing x25519 key should fail
let mut bad_registration = RegisteredMember {
agent_id: "bad-peer".to_string(),
ed25519_public_key: [0u8; 32],
x25519_public_key: [0u8; 32], // Invalid: all zeros
capabilities: vec!["worker".to_string()],
joined_at: chrono::Utc::now().timestamp_millis() as u64,
last_heartbeat: 0,
signature: [0u8; 64],
};
assert!(!swarm.register_member(bad_registration.clone()));
// Empty capabilities should also fail
bad_registration.x25519_public_key = [1u8; 32];
bad_registration.capabilities = vec![];
assert!(!swarm.register_member(bad_registration));
}
#[test]
fn test_artifact_store_and_pointer() {
let swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
let data = b"test artifact data";
let cid = swarm.store_artifact(data, true).unwrap();
let retrieved = swarm.retrieve_artifact(&cid).unwrap();
assert_eq!(data.as_slice(), retrieved.as_slice());
let pointer = swarm.create_artifact_pointer(ArtifactType::QTable, &cid, "100x10");
assert!(pointer.is_some());
}
#[tokio::test]
async fn test_task_claim() {
let mut swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
swarm.connect().await.unwrap();
let task = TaskEnvelope::new(
"task-001".to_string(),
"local:module".to_string(),
"process".to_string(),
"local:input".to_string(),
[0u8; 32],
TaskBudgets {
fuel_limit: 1000000,
memory_mb: 128,
timeout_ms: 30000,
},
"requester".to_string(),
chrono::Utc::now().timestamp_millis() as u64 + 60000,
1,
);
// Submit task
swarm.pending_tasks.write().insert(task.task_id.clone(), task.clone());
// Claim should succeed
let claim = swarm.claim_task("task-001").unwrap();
assert_eq!(claim.task_id, "task-001");
assert_eq!(claim.executor_id, "test-agent");
// Second claim should fail (within window)
assert!(swarm.claim_task("task-001").is_err());
}
}

View File

@@ -0,0 +1,302 @@
//! Plaid API Integration with Browser-Local Learning
//!
//! This module provides privacy-preserving financial data analysis that runs entirely
//! in the browser. No financial data, learning patterns, or AI models ever leave the
//! client device.
//!
//! ## Modules
//!
//! - `zkproofs` - Zero-knowledge proofs for financial statements
//! - `wasm` - WASM bindings for browser integration
//! - `zk_wasm` - WASM bindings for ZK proofs
pub mod zkproofs;
pub mod zkproofs_prod;
#[cfg(feature = "wasm")]
pub mod wasm;
#[cfg(feature = "wasm")]
pub mod zk_wasm;
#[cfg(feature = "wasm")]
pub mod zk_wasm_prod;
// Re-export demo ZK types (for backward compatibility)
pub use zkproofs::{
ZkProof, ProofType, VerificationResult, Commitment,
FinancialProofBuilder, RentalApplicationProof,
};
// Re-export production ZK types
pub use zkproofs_prod::{
PedersenCommitment, ZkRangeProof, ProofMetadata,
VerificationResult as ProdVerificationResult,
FinancialProver, FinancialVerifier, RentalApplicationBundle,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Financial transaction from Plaid
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Transaction {
pub transaction_id: String,
pub account_id: String,
pub amount: f64,
pub date: String,
pub name: String,
pub merchant_name: Option<String>,
pub category: Vec<String>,
pub pending: bool,
pub payment_channel: String,
}
/// Spending pattern learned from transactions
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SpendingPattern {
pub pattern_id: String,
pub category: String,
pub avg_amount: f64,
pub frequency_days: f32,
pub confidence: f64,
pub last_seen: u64,
}
/// Category prediction result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CategoryPrediction {
pub category: String,
pub confidence: f64,
pub similar_transactions: Vec<String>,
}
/// Anomaly detection result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AnomalyResult {
pub is_anomaly: bool,
pub anomaly_score: f64,
pub reason: String,
pub expected_amount: f64,
}
/// Budget recommendation from learning
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BudgetRecommendation {
pub category: String,
pub recommended_limit: f64,
pub current_avg: f64,
pub trend: String, // "increasing", "stable", "decreasing"
pub confidence: f64,
}
/// Local learning state for financial patterns
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FinancialLearningState {
pub version: u64,
pub patterns: HashMap<String, SpendingPattern>,
/// Category embeddings - HashMap prevents unbounded growth (was Vec which leaked memory)
pub category_embeddings: HashMap<String, Vec<f32>>,
pub q_values: HashMap<String, f64>, // state|action -> Q-value
pub temporal_weights: Vec<f32>, // Day-of-week weights (7 days: Sun-Sat)
pub monthly_weights: Vec<f32>, // Day-of-month weights (31 days)
/// Maximum embeddings to store (LRU eviction when exceeded)
#[serde(default = "default_max_embeddings")]
pub max_embeddings: usize,
}
fn default_max_embeddings() -> usize {
10_000 // ~400KB at 10 floats per embedding
}
impl Default for FinancialLearningState {
fn default() -> Self {
Self {
version: 0,
patterns: HashMap::new(),
category_embeddings: HashMap::new(),
q_values: HashMap::new(),
temporal_weights: vec![1.0; 7], // 7 days
monthly_weights: vec![1.0; 31], // 31 days
max_embeddings: default_max_embeddings(),
}
}
}
/// Transaction feature vector for ML
#[derive(Debug, Clone)]
pub struct TransactionFeatures {
pub amount_normalized: f32,
pub day_of_week: f32,
pub day_of_month: f32,
pub hour_of_day: f32,
pub is_weekend: f32,
pub category_hash: Vec<f32>, // LSH of category text
pub merchant_hash: Vec<f32>, // LSH of merchant name
}
impl TransactionFeatures {
/// Convert to embedding vector for HNSW indexing
pub fn to_embedding(&self) -> Vec<f32> {
let mut vec = vec![
self.amount_normalized,
self.day_of_week / 7.0,
self.day_of_month / 31.0,
self.hour_of_day / 24.0,
self.is_weekend,
];
vec.extend(&self.category_hash);
vec.extend(&self.merchant_hash);
vec
}
}
/// Extract features from a transaction
pub fn extract_features(tx: &Transaction) -> TransactionFeatures {
// Parse date for temporal features
let (dow, dom, _hour) = parse_date(&tx.date);
// Normalize amount (log scale, clipped)
let amount_normalized = (tx.amount.abs().ln() / 10.0).min(1.0) as f32;
// LSH hash for category
let category_text = tx.category.join(" ");
let category_hash = simple_lsh(&category_text, 8);
// LSH hash for merchant
let merchant = tx.merchant_name.as_deref().unwrap_or(&tx.name);
let merchant_hash = simple_lsh(merchant, 8);
TransactionFeatures {
amount_normalized,
day_of_week: dow as f32,
day_of_month: dom as f32,
hour_of_day: 12.0, // Default to noon if no time
is_weekend: if dow >= 5 { 1.0 } else { 0.0 },
category_hash,
merchant_hash,
}
}
/// Simple LSH (locality-sensitive hashing) for text
fn simple_lsh(text: &str, dims: usize) -> Vec<f32> {
let mut hash = vec![0.0f32; dims];
let text_lower = text.to_lowercase();
for (i, c) in text_lower.chars().enumerate() {
let idx = (c as usize + i * 31) % dims;
hash[idx] += 1.0;
}
// Normalize
let norm: f32 = hash.iter().map(|x| x * x).sum::<f32>().sqrt().max(1.0);
hash.iter_mut().for_each(|x| *x /= norm);
hash
}
/// Parse date string to (day_of_week, day_of_month, hour)
fn parse_date(date_str: &str) -> (u8, u8, u8) {
// Simple parser for YYYY-MM-DD format
let parts: Vec<&str> = date_str.split('-').collect();
if parts.len() >= 3 {
let day: u8 = parts[2].parse().unwrap_or(1);
let month: u8 = parts[1].parse().unwrap_or(1);
let year: u16 = parts[0].parse().unwrap_or(2024);
// Simple day-of-week calculation (Zeller's congruence simplified)
let dow = ((day as u16 + 13 * (month as u16 + 1) / 5 + year + year / 4) % 7) as u8;
(dow, day, 12) // Default hour
} else {
(0, 1, 12)
}
}
/// Q-learning update for spending decisions
pub fn update_q_value(
state: &FinancialLearningState,
category: &str,
action: &str, // "under_budget", "at_budget", "over_budget"
reward: f64,
learning_rate: f64,
) -> f64 {
let key = format!("{}|{}", category, action);
let current_q = state.q_values.get(&key).copied().unwrap_or(0.0);
// Q-learning update: Q(s,a) = Q(s,a) + α * (r - Q(s,a))
current_q + learning_rate * (reward - current_q)
}
/// Generate spending recommendation based on learned Q-values
pub fn get_recommendation(
state: &FinancialLearningState,
category: &str,
current_spending: f64,
budget: f64,
) -> BudgetRecommendation {
let ratio = current_spending / budget.max(1.0);
let actions = ["under_budget", "at_budget", "over_budget"];
let mut best_action = "at_budget";
let mut best_q = f64::NEG_INFINITY;
for action in &actions {
let key = format!("{}|{}", category, action);
if let Some(&q) = state.q_values.get(&key) {
if q > best_q {
best_q = q;
best_action = action;
}
}
}
let trend = if ratio < 0.8 {
"decreasing"
} else if ratio > 1.2 {
"increasing"
} else {
"stable"
};
BudgetRecommendation {
category: category.to_string(),
recommended_limit: budget * best_q.max(0.5).min(2.0),
current_avg: current_spending,
trend: trend.to_string(),
confidence: (1.0 - 1.0 / (state.version as f64 + 1.0)).max(0.1),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_extract_features() {
let tx = Transaction {
transaction_id: "tx123".to_string(),
account_id: "acc456".to_string(),
amount: 50.0,
date: "2024-03-15".to_string(),
name: "Coffee Shop".to_string(),
merchant_name: Some("Starbucks".to_string()),
category: vec!["Food".to_string(), "Coffee".to_string()],
pending: false,
payment_channel: "in_store".to_string(),
};
let features = extract_features(&tx);
assert!(features.amount_normalized >= 0.0);
assert!(features.amount_normalized <= 1.0);
assert_eq!(features.category_hash.len(), 8);
}
#[test]
fn test_q_learning() {
let state = FinancialLearningState::default();
let new_q = update_q_value(&state, "Food", "under_budget", 1.0, 0.1);
assert!(new_q > 0.0);
}
}

View File

@@ -0,0 +1,334 @@
//! WASM bindings for Plaid local learning
//!
//! Exposes browser-local financial learning to JavaScript.
#![cfg(feature = "wasm")]
use wasm_bindgen::prelude::*;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use super::{
Transaction, SpendingPattern, CategoryPrediction, AnomalyResult,
BudgetRecommendation, FinancialLearningState, TransactionFeatures,
extract_features, update_q_value, get_recommendation,
};
/// Browser-local financial learning engine
///
/// All data stays in the browser. Uses IndexedDB for persistence.
#[wasm_bindgen]
pub struct PlaidLocalLearner {
state: Arc<RwLock<FinancialLearningState>>,
hnsw_index: crate::WasmHnswIndex,
spiking_net: crate::WasmSpikingNetwork,
learning_rate: f64,
}
#[wasm_bindgen]
impl PlaidLocalLearner {
/// Create a new local learner
///
/// All learning happens in-browser with no data exfiltration.
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
state: Arc::new(RwLock::new(FinancialLearningState::default())),
hnsw_index: crate::WasmHnswIndex::new(),
spiking_net: crate::WasmSpikingNetwork::new(21, 32, 8), // Features -> hidden -> categories
learning_rate: 0.1,
}
}
/// Load state from serialized JSON (from IndexedDB)
#[wasm_bindgen(js_name = loadState)]
pub fn load_state(&mut self, json: &str) -> Result<(), JsValue> {
let loaded: FinancialLearningState = serde_json::from_str(json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
*self.state.write() = loaded;
// Rebuild HNSW index from loaded embeddings
let state = self.state.read();
for (id, embedding) in &state.category_embeddings {
self.hnsw_index.insert(id, embedding.clone());
}
Ok(())
}
/// Serialize state to JSON (for IndexedDB persistence)
#[wasm_bindgen(js_name = saveState)]
pub fn save_state(&self) -> Result<String, JsValue> {
let state = self.state.read();
serde_json::to_string(&*state)
.map_err(|e| JsValue::from_str(&format!("Serialize error: {}", e)))
}
/// Process a batch of transactions and learn patterns
///
/// Returns updated insights without sending data anywhere.
#[wasm_bindgen(js_name = processTransactions)]
pub fn process_transactions(&mut self, transactions_json: &str) -> Result<JsValue, JsValue> {
let transactions: Vec<Transaction> = serde_json::from_str(transactions_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
let mut state = self.state.write();
let mut insights = ProcessingInsights::default();
for tx in &transactions {
// Extract features
let features = extract_features(tx);
let embedding = features.to_embedding();
// Add to HNSW index for similarity search
self.hnsw_index.insert(&tx.transaction_id, embedding.clone());
// Update category embedding (HashMap prevents memory leak - overwrites existing)
let category_key = tx.category.join(":");
// LRU-style eviction if at capacity
if state.category_embeddings.len() >= state.max_embeddings {
// Remove oldest entry (in production, use proper LRU cache)
if let Some(key) = state.category_embeddings.keys().next().cloned() {
state.category_embeddings.remove(&key);
}
}
state.category_embeddings.insert(category_key.clone(), embedding.clone());
// Learn spending pattern
self.learn_pattern(&mut state, tx, &features);
// Update temporal weights
let dow = features.day_of_week as usize % 7;
let dom = (features.day_of_month as usize).saturating_sub(1) % 31;
state.temporal_weights[dow] += 0.1 * (tx.amount.abs() as f32);
state.monthly_weights[dom] += 0.1 * (tx.amount.abs() as f32);
// Feed to spiking network for temporal learning
let spike_input = self.features_to_spikes(&features);
let _output = self.spiking_net.forward(spike_input);
insights.transactions_processed += 1;
insights.total_amount += tx.amount.abs();
}
state.version += 1;
insights.patterns_learned = state.patterns.len();
insights.state_version = state.version;
serde_wasm_bindgen::to_value(&insights)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Predict category for a new transaction
#[wasm_bindgen(js_name = predictCategory)]
pub fn predict_category(&self, transaction_json: &str) -> Result<JsValue, JsValue> {
let tx: Transaction = serde_json::from_str(transaction_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
let features = extract_features(&tx);
let embedding = features.to_embedding();
// Find similar transactions via HNSW
let results = self.hnsw_index.search(embedding.clone(), 5);
// Aggregate category votes from similar transactions
let prediction = CategoryPrediction {
category: tx.category.first().cloned().unwrap_or_default(),
confidence: 0.85,
similar_transactions: vec![], // Would populate from results
};
serde_wasm_bindgen::to_value(&prediction)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Detect if a transaction is anomalous
#[wasm_bindgen(js_name = detectAnomaly)]
pub fn detect_anomaly(&self, transaction_json: &str) -> Result<JsValue, JsValue> {
let tx: Transaction = serde_json::from_str(transaction_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
let state = self.state.read();
let category_key = tx.category.join(":");
let result = if let Some(pattern) = state.patterns.get(&category_key) {
let amount_diff = (tx.amount.abs() - pattern.avg_amount).abs();
let threshold = pattern.avg_amount * 2.0;
AnomalyResult {
is_anomaly: amount_diff > threshold,
anomaly_score: amount_diff / pattern.avg_amount.max(1.0),
reason: if amount_diff > threshold {
format!("Amount ${:.2} is {:.1}x typical", tx.amount, amount_diff / pattern.avg_amount.max(1.0))
} else {
"Normal transaction".to_string()
},
expected_amount: pattern.avg_amount,
}
} else {
AnomalyResult {
is_anomaly: false,
anomaly_score: 0.0,
reason: "First transaction in this category".to_string(),
expected_amount: tx.amount.abs(),
}
};
serde_wasm_bindgen::to_value(&result)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Get budget recommendation for a category
#[wasm_bindgen(js_name = getBudgetRecommendation)]
pub fn get_budget_recommendation(
&self,
category: &str,
current_spending: f64,
budget: f64,
) -> Result<JsValue, JsValue> {
let state = self.state.read();
let rec = get_recommendation(&state, category, current_spending, budget);
serde_wasm_bindgen::to_value(&rec)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Record spending outcome for Q-learning
#[wasm_bindgen(js_name = recordOutcome)]
pub fn record_outcome(&mut self, category: &str, action: &str, reward: f64) {
let mut state = self.state.write();
let key = format!("{}|{}", category, action);
let new_q = update_q_value(&state, category, action, reward, self.learning_rate);
state.q_values.insert(key, new_q);
state.version += 1;
}
/// Get spending patterns summary
#[wasm_bindgen(js_name = getPatternsSummary)]
pub fn get_patterns_summary(&self) -> Result<JsValue, JsValue> {
let state = self.state.read();
let summary: Vec<SpendingPattern> = state.patterns.values().cloned().collect();
serde_wasm_bindgen::to_value(&summary)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Get temporal spending heatmap (day of week + day of month)
#[wasm_bindgen(js_name = getTemporalHeatmap)]
pub fn get_temporal_heatmap(&self) -> Result<JsValue, JsValue> {
let state = self.state.read();
let heatmap = TemporalHeatmap {
day_of_week: state.temporal_weights.clone(),
day_of_month: state.monthly_weights.clone(),
};
serde_wasm_bindgen::to_value(&heatmap)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Find similar transactions to a given one
#[wasm_bindgen(js_name = findSimilarTransactions)]
pub fn find_similar_transactions(&self, transaction_json: &str, k: usize) -> JsValue {
let Ok(tx) = serde_json::from_str::<Transaction>(transaction_json) else {
return JsValue::NULL;
};
let features = extract_features(&tx);
let embedding = features.to_embedding();
self.hnsw_index.search(embedding, k)
}
/// Get current learning statistics
#[wasm_bindgen(js_name = getStats)]
pub fn get_stats(&self) -> Result<JsValue, JsValue> {
let state = self.state.read();
let stats = LearningStats {
version: state.version,
patterns_count: state.patterns.len(),
q_values_count: state.q_values.len(),
embeddings_count: state.category_embeddings.len(),
index_size: self.hnsw_index.len(),
};
serde_wasm_bindgen::to_value(&stats)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Clear all learned data (privacy feature)
#[wasm_bindgen]
pub fn clear(&mut self) {
*self.state.write() = FinancialLearningState::default();
self.hnsw_index = crate::WasmHnswIndex::new();
self.spiking_net.reset();
}
// Internal helper methods
fn learn_pattern(&self, state: &mut FinancialLearningState, tx: &Transaction, features: &TransactionFeatures) {
let category_key = tx.category.join(":");
let pattern = state.patterns.entry(category_key.clone()).or_insert_with(|| {
SpendingPattern {
pattern_id: format!("pat_{}", category_key),
category: category_key.clone(),
avg_amount: 0.0,
frequency_days: 30.0,
confidence: 0.0,
last_seen: 0,
}
});
// Exponential moving average for amount
pattern.avg_amount = pattern.avg_amount * 0.9 + tx.amount.abs() * 0.1;
pattern.confidence = (pattern.confidence + 0.1).min(1.0);
// Simple timestamp (would use actual timestamp in production)
pattern.last_seen = state.version;
}
fn features_to_spikes(&self, features: &TransactionFeatures) -> Vec<u8> {
let embedding = features.to_embedding();
// Convert floats to spike train (probability encoding)
embedding.iter().map(|&v| {
if v > 0.5 { 1 } else { 0 }
}).collect()
}
}
impl Default for PlaidLocalLearner {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct ProcessingInsights {
transactions_processed: usize,
total_amount: f64,
patterns_learned: usize,
state_version: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct TemporalHeatmap {
day_of_week: Vec<f32>,
day_of_month: Vec<f32>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct LearningStats {
version: u64,
patterns_count: usize,
q_values_count: usize,
embeddings_count: usize,
index_size: usize,
}

View File

@@ -0,0 +1,322 @@
//! WASM bindings for Zero-Knowledge Financial Proofs
//!
//! Generate and verify ZK proofs entirely in the browser.
#![cfg(feature = "wasm")]
use wasm_bindgen::prelude::*;
use serde::{Deserialize, Serialize};
use super::zkproofs::{
FinancialProofBuilder, RangeProof, RentalApplicationProof,
ZkProof, VerificationResult, ProofType,
};
/// WASM-compatible ZK Financial Proof Generator
///
/// All proof generation happens in the browser.
/// Private financial data never leaves the client.
#[wasm_bindgen]
pub struct ZkFinancialProver {
builder: FinancialProofBuilder,
}
#[wasm_bindgen]
impl ZkFinancialProver {
/// Create a new prover instance
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
builder: FinancialProofBuilder::new(),
}
}
/// Load income data (array of monthly income in cents)
#[wasm_bindgen(js_name = loadIncome)]
pub fn load_income(&mut self, monthly_income: Vec<u64>) {
self.builder = std::mem::take(&mut self.builder)
.with_income(monthly_income);
}
/// Load expense data for a category
#[wasm_bindgen(js_name = loadExpenses)]
pub fn load_expenses(&mut self, category: &str, monthly_expenses: Vec<u64>) {
self.builder = std::mem::take(&mut self.builder)
.with_expenses(category, monthly_expenses);
}
/// Load balance history (array of daily balances in cents, can be negative)
#[wasm_bindgen(js_name = loadBalances)]
pub fn load_balances(&mut self, daily_balances: Vec<i64>) {
self.builder = std::mem::take(&mut self.builder)
.with_balances(daily_balances);
}
// ========================================================================
// Proof Generation
// ========================================================================
/// Prove: average income ≥ threshold
///
/// Returns serialized ZkProof or error string
#[wasm_bindgen(js_name = proveIncomeAbove)]
pub fn prove_income_above(&self, threshold_cents: u64) -> Result<JsValue, JsValue> {
self.builder.prove_income_above(threshold_cents)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
/// Prove: income ≥ multiplier × rent
///
/// Common use: prove income ≥ 3× rent for apartment application
#[wasm_bindgen(js_name = proveAffordability)]
pub fn prove_affordability(&self, rent_cents: u64, multiplier: u64) -> Result<JsValue, JsValue> {
self.builder.prove_affordability(rent_cents, multiplier)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
/// Prove: no overdrafts in the past N days
#[wasm_bindgen(js_name = proveNoOverdrafts)]
pub fn prove_no_overdrafts(&self, days: usize) -> Result<JsValue, JsValue> {
self.builder.prove_no_overdrafts(days)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
/// Prove: current savings ≥ threshold
#[wasm_bindgen(js_name = proveSavingsAbove)]
pub fn prove_savings_above(&self, threshold_cents: u64) -> Result<JsValue, JsValue> {
self.builder.prove_savings_above(threshold_cents)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
/// Prove: average spending in category ≤ budget
#[wasm_bindgen(js_name = proveBudgetCompliance)]
pub fn prove_budget_compliance(&self, category: &str, budget_cents: u64) -> Result<JsValue, JsValue> {
self.builder.prove_budget_compliance(category, budget_cents)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
/// Prove: debt-to-income ratio ≤ max_ratio%
#[wasm_bindgen(js_name = proveDebtRatio)]
pub fn prove_debt_ratio(&self, monthly_debt_cents: u64, max_ratio_percent: u64) -> Result<JsValue, JsValue> {
self.builder.prove_debt_ratio(monthly_debt_cents, max_ratio_percent)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
// ========================================================================
// Composite Proofs
// ========================================================================
/// Generate complete rental application proof bundle
///
/// Includes: income proof, stability proof, optional savings proof
#[wasm_bindgen(js_name = createRentalApplication)]
pub fn create_rental_application(
&self,
rent_cents: u64,
income_multiplier: u64,
stability_days: usize,
savings_months: Option<u64>,
) -> Result<JsValue, JsValue> {
RentalApplicationProof::create(
&self.builder,
rent_cents,
income_multiplier,
stability_days,
savings_months,
)
.map(|proof| serde_wasm_bindgen::to_value(&proof).unwrap())
.map_err(|e| JsValue::from_str(&e))
}
}
impl Default for ZkFinancialProver {
fn default() -> Self {
Self::new()
}
}
/// WASM-compatible ZK Proof Verifier
///
/// Can verify proofs without knowing the private values
#[wasm_bindgen]
pub struct ZkProofVerifier;
#[wasm_bindgen]
impl ZkProofVerifier {
/// Verify a single ZK proof
///
/// Returns verification result with validity and statement
#[wasm_bindgen]
pub fn verify(proof_json: &str) -> Result<JsValue, JsValue> {
let proof: ZkProof = serde_json::from_str(proof_json)
.map_err(|e| JsValue::from_str(&format!("Invalid proof: {}", e)))?;
let result = RangeProof::verify(&proof);
serde_wasm_bindgen::to_value(&result)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Verify a rental application proof bundle
#[wasm_bindgen(js_name = verifyRentalApplication)]
pub fn verify_rental_application(application_json: &str) -> Result<JsValue, JsValue> {
let application: RentalApplicationProof = serde_json::from_str(application_json)
.map_err(|e| JsValue::from_str(&format!("Invalid application: {}", e)))?;
let results = application.verify();
let is_valid = application.is_valid();
let summary = VerificationSummary {
all_valid: is_valid,
results,
};
serde_wasm_bindgen::to_value(&summary)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Get human-readable statement from proof
#[wasm_bindgen(js_name = getStatement)]
pub fn get_statement(proof_json: &str) -> Result<String, JsValue> {
let proof: ZkProof = serde_json::from_str(proof_json)
.map_err(|e| JsValue::from_str(&format!("Invalid proof: {}", e)))?;
Ok(proof.public_inputs.statement)
}
/// Check if proof is expired
#[wasm_bindgen(js_name = isExpired)]
pub fn is_expired(proof_json: &str) -> Result<bool, JsValue> {
let proof: ZkProof = serde_json::from_str(proof_json)
.map_err(|e| JsValue::from_str(&format!("Invalid proof: {}", e)))?;
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
Ok(proof.expires_at.map(|exp| now > exp).unwrap_or(false))
}
}
#[derive(Serialize, Deserialize)]
struct VerificationSummary {
all_valid: bool,
results: Vec<VerificationResult>,
}
/// Utility functions for ZK proofs
#[wasm_bindgen]
pub struct ZkUtils;
#[wasm_bindgen]
impl ZkUtils {
/// Convert dollars to cents (proof system uses cents for precision)
#[wasm_bindgen(js_name = dollarsToCents)]
pub fn dollars_to_cents(dollars: f64) -> u64 {
(dollars * 100.0).round() as u64
}
/// Convert cents to dollars
#[wasm_bindgen(js_name = centsToDollars)]
pub fn cents_to_dollars(cents: u64) -> f64 {
cents as f64 / 100.0
}
/// Generate a shareable proof URL (base64 encoded)
#[wasm_bindgen(js_name = proofToUrl)]
pub fn proof_to_url(proof_json: &str, base_url: &str) -> String {
let encoded = base64_encode(proof_json.as_bytes());
format!("{}?proof={}", base_url, encoded)
}
/// Extract proof from URL parameter
#[wasm_bindgen(js_name = proofFromUrl)]
pub fn proof_from_url(encoded: &str) -> Result<String, JsValue> {
let decoded = base64_decode(encoded)
.map_err(|e| JsValue::from_str(&format!("Invalid encoding: {}", e)))?;
String::from_utf8(decoded)
.map_err(|e| JsValue::from_str(&format!("Invalid UTF-8: {}", e)))
}
}
// Simple base64 encoding (no external deps)
fn base64_encode(data: &[u8]) -> String {
const ALPHABET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
let mut result = String::new();
for chunk in data.chunks(3) {
let mut n = (chunk[0] as u32) << 16;
if chunk.len() > 1 {
n |= (chunk[1] as u32) << 8;
}
if chunk.len() > 2 {
n |= chunk[2] as u32;
}
result.push(ALPHABET[(n >> 18) as usize & 0x3F] as char);
result.push(ALPHABET[(n >> 12) as usize & 0x3F] as char);
if chunk.len() > 1 {
result.push(ALPHABET[(n >> 6) as usize & 0x3F] as char);
} else {
result.push('=');
}
if chunk.len() > 2 {
result.push(ALPHABET[n as usize & 0x3F] as char);
} else {
result.push('=');
}
}
result
}
fn base64_decode(data: &str) -> Result<Vec<u8>, &'static str> {
const DECODE: [i8; 128] = [
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,62,-1,-1,-1,63,
52,53,54,55,56,57,58,59,60,61,-1,-1,-1,-1,-1,-1,
-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,
15,16,17,18,19,20,21,22,23,24,25,-1,-1,-1,-1,-1,
-1,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,-1,-1,-1,-1,-1,
];
let mut result = Vec::new();
let bytes: Vec<u8> = data.bytes().filter(|&b| b != b'=').collect();
for chunk in bytes.chunks(4) {
if chunk.len() < 2 {
break;
}
let mut n = 0u32;
for (i, &b) in chunk.iter().enumerate() {
if b >= 128 || DECODE[b as usize] < 0 {
return Err("Invalid base64 character");
}
n |= (DECODE[b as usize] as u32) << (18 - i * 6);
}
result.push((n >> 16) as u8);
if chunk.len() > 2 {
result.push((n >> 8) as u8);
}
if chunk.len() > 3 {
result.push(n as u8);
}
}
Ok(result)
}

View File

@@ -0,0 +1,390 @@
//! Production WASM Bindings for Zero-Knowledge Financial Proofs
//!
//! Exposes production-grade Bulletproofs to JavaScript with a safe API.
//!
//! ## Security
//!
//! - All cryptographic operations use audited libraries
//! - Constant-time operations prevent timing attacks
//! - No sensitive data exposed to JavaScript
#![cfg(feature = "wasm")]
use wasm_bindgen::prelude::*;
use serde::{Deserialize, Serialize};
use super::zkproofs_prod::{
FinancialProver, FinancialVerifier, ZkRangeProof,
RentalApplicationBundle, VerificationResult,
};
/// Production ZK Financial Prover for browser use
///
/// Uses real Bulletproofs for cryptographically secure range proofs.
#[wasm_bindgen]
pub struct WasmFinancialProver {
inner: FinancialProver,
}
#[wasm_bindgen]
impl WasmFinancialProver {
/// Create a new prover
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
inner: FinancialProver::new(),
}
}
/// Set monthly income data (in cents)
///
/// Example: $6,500/month = 650000 cents
#[wasm_bindgen(js_name = setIncome)]
pub fn set_income(&mut self, income_json: &str) -> Result<(), JsValue> {
let income: Vec<u64> = serde_json::from_str(income_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
self.inner.set_income(income);
Ok(())
}
/// Set daily balance history (in cents)
///
/// Negative values represent overdrafts.
#[wasm_bindgen(js_name = setBalances)]
pub fn set_balances(&mut self, balances_json: &str) -> Result<(), JsValue> {
let balances: Vec<i64> = serde_json::from_str(balances_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
self.inner.set_balances(balances);
Ok(())
}
/// Set expense data for a category (in cents)
#[wasm_bindgen(js_name = setExpenses)]
pub fn set_expenses(&mut self, category: &str, expenses_json: &str) -> Result<(), JsValue> {
let expenses: Vec<u64> = serde_json::from_str(expenses_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
self.inner.set_expenses(category, expenses);
Ok(())
}
/// Prove: average income >= threshold (in cents)
///
/// Returns a ZK proof that can be verified without revealing actual income.
#[wasm_bindgen(js_name = proveIncomeAbove)]
pub fn prove_income_above(&mut self, threshold_cents: u64) -> Result<JsValue, JsValue> {
let proof = self.inner.prove_income_above(threshold_cents)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&ProofResult::from_proof(proof))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Prove: income >= multiplier × rent
///
/// Common requirement: income must be 3x rent.
#[wasm_bindgen(js_name = proveAffordability)]
pub fn prove_affordability(&mut self, rent_cents: u64, multiplier: u64) -> Result<JsValue, JsValue> {
let proof = self.inner.prove_affordability(rent_cents, multiplier)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&ProofResult::from_proof(proof))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Prove: no overdrafts in the past N days
#[wasm_bindgen(js_name = proveNoOverdrafts)]
pub fn prove_no_overdrafts(&mut self, days: usize) -> Result<JsValue, JsValue> {
let proof = self.inner.prove_no_overdrafts(days)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&ProofResult::from_proof(proof))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Prove: current savings >= threshold (in cents)
#[wasm_bindgen(js_name = proveSavingsAbove)]
pub fn prove_savings_above(&mut self, threshold_cents: u64) -> Result<JsValue, JsValue> {
let proof = self.inner.prove_savings_above(threshold_cents)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&ProofResult::from_proof(proof))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Prove: average spending in category <= budget (in cents)
#[wasm_bindgen(js_name = proveBudgetCompliance)]
pub fn prove_budget_compliance(&mut self, category: &str, budget_cents: u64) -> Result<JsValue, JsValue> {
let proof = self.inner.prove_budget_compliance(category, budget_cents)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&ProofResult::from_proof(proof))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Create a complete rental application bundle
///
/// Combines income, stability, and optional savings proofs.
#[wasm_bindgen(js_name = createRentalApplication)]
pub fn create_rental_application(
&mut self,
rent_cents: u64,
income_multiplier: u64,
stability_days: usize,
savings_months: Option<u64>,
) -> Result<JsValue, JsValue> {
let bundle = RentalApplicationBundle::create(
&mut self.inner,
rent_cents,
income_multiplier,
stability_days,
savings_months,
).map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&BundleResult::from_bundle(bundle))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
}
impl Default for WasmFinancialProver {
fn default() -> Self {
Self::new()
}
}
/// Production ZK Verifier for browser use
#[wasm_bindgen]
pub struct WasmFinancialVerifier;
#[wasm_bindgen]
impl WasmFinancialVerifier {
/// Create a new verifier
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self
}
/// Verify a ZK range proof
///
/// Returns verification result without learning the private value.
#[wasm_bindgen]
pub fn verify(&self, proof_json: &str) -> Result<JsValue, JsValue> {
let proof_result: ProofResult = serde_json::from_str(proof_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
let proof = proof_result.to_proof()
.map_err(|e| JsValue::from_str(&e))?;
let result = FinancialVerifier::verify(&proof)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&VerificationOutput::from_result(result))
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Verify a rental application bundle
#[wasm_bindgen(js_name = verifyBundle)]
pub fn verify_bundle(&self, bundle_json: &str) -> Result<JsValue, JsValue> {
let bundle_result: BundleResult = serde_json::from_str(bundle_json)
.map_err(|e| JsValue::from_str(&format!("Parse error: {}", e)))?;
let bundle = bundle_result.to_bundle()
.map_err(|e| JsValue::from_str(&e))?;
let valid = bundle.verify()
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&BundleVerification {
valid,
application_id: bundle.application_id,
created_at: bundle.created_at,
})
.map_err(|e| JsValue::from_str(&e.to_string()))
}
}
impl Default for WasmFinancialVerifier {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// JSON-Serializable Types for JS Interop
// ============================================================================
/// Proof result for JS consumption
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProofResult {
/// Base64-encoded proof bytes
pub proof_base64: String,
/// Commitment point (hex)
pub commitment_hex: String,
/// Lower bound
pub min: u64,
/// Upper bound
pub max: u64,
/// Statement
pub statement: String,
/// Generated timestamp
pub generated_at: u64,
/// Expiration timestamp
pub expires_at: Option<u64>,
/// Proof hash (hex)
pub hash_hex: String,
}
impl ProofResult {
fn from_proof(proof: ZkRangeProof) -> Self {
use base64::{Engine as _, engine::general_purpose::STANDARD};
Self {
proof_base64: STANDARD.encode(&proof.proof_bytes),
commitment_hex: hex::encode(proof.commitment.point),
min: proof.min,
max: proof.max,
statement: proof.statement,
generated_at: proof.metadata.generated_at,
expires_at: proof.metadata.expires_at,
hash_hex: hex::encode(proof.metadata.hash),
}
}
fn to_proof(&self) -> Result<ZkRangeProof, String> {
use super::zkproofs_prod::{PedersenCommitment, ProofMetadata};
use base64::{Engine as _, engine::general_purpose::STANDARD};
let proof_bytes = STANDARD.decode(&self.proof_base64)
.map_err(|e| format!("Invalid base64: {}", e))?;
let commitment_bytes: [u8; 32] = hex::decode(&self.commitment_hex)
.map_err(|e| format!("Invalid commitment hex: {}", e))?
.try_into()
.map_err(|_| "Invalid commitment length")?;
let hash_bytes: [u8; 32] = hex::decode(&self.hash_hex)
.map_err(|e| format!("Invalid hash hex: {}", e))?
.try_into()
.map_err(|_| "Invalid hash length")?;
Ok(ZkRangeProof {
proof_bytes,
commitment: PedersenCommitment { point: commitment_bytes },
min: self.min,
max: self.max,
statement: self.statement.clone(),
metadata: ProofMetadata {
generated_at: self.generated_at,
expires_at: self.expires_at,
version: 1,
hash: hash_bytes,
},
})
}
}
/// Bundle result for JS consumption
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BundleResult {
/// Income proof
pub income_proof: ProofResult,
/// Stability proof
pub stability_proof: ProofResult,
/// Optional savings proof
pub savings_proof: Option<ProofResult>,
/// Application ID
pub application_id: String,
/// Created timestamp
pub created_at: u64,
/// Bundle hash (hex)
pub bundle_hash_hex: String,
}
impl BundleResult {
fn from_bundle(bundle: RentalApplicationBundle) -> Self {
Self {
income_proof: ProofResult::from_proof(bundle.income_proof),
stability_proof: ProofResult::from_proof(bundle.stability_proof),
savings_proof: bundle.savings_proof.map(ProofResult::from_proof),
application_id: bundle.application_id,
created_at: bundle.created_at,
bundle_hash_hex: hex::encode(bundle.bundle_hash),
}
}
fn to_bundle(&self) -> Result<RentalApplicationBundle, String> {
let bundle_hash: [u8; 32] = hex::decode(&self.bundle_hash_hex)
.map_err(|e| format!("Invalid bundle hash: {}", e))?
.try_into()
.map_err(|_| "Invalid bundle hash length")?;
Ok(RentalApplicationBundle {
income_proof: self.income_proof.to_proof()?,
stability_proof: self.stability_proof.to_proof()?,
savings_proof: self.savings_proof.as_ref().map(|p| p.to_proof()).transpose()?,
application_id: self.application_id.clone(),
created_at: self.created_at,
bundle_hash,
})
}
}
/// Verification output for JS consumption
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationOutput {
/// Whether the proof is valid
pub valid: bool,
/// The statement that was verified
pub statement: String,
/// When verified
pub verified_at: u64,
/// Error message if invalid
pub error: Option<String>,
}
impl VerificationOutput {
fn from_result(result: super::zkproofs_prod::VerificationResult) -> Self {
Self {
valid: result.valid,
statement: result.statement,
verified_at: result.verified_at,
error: result.error,
}
}
}
/// Bundle verification result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BundleVerification {
pub valid: bool,
pub application_id: String,
pub created_at: u64,
}
// ============================================================================
// Utility Functions
// ============================================================================
/// Check if production ZK is available
#[wasm_bindgen(js_name = isProductionZkAvailable)]
pub fn is_production_zk_available() -> bool {
true
}
/// Get ZK library version info
#[wasm_bindgen(js_name = getZkVersionInfo)]
pub fn get_zk_version_info() -> JsValue {
let info = serde_json::json!({
"version": "1.0.0",
"library": "bulletproofs",
"curve": "ristretto255",
"transcript": "merlin",
"security_level": "128-bit",
"features": [
"range_proofs",
"pedersen_commitments",
"constant_time_operations",
"fiat_shamir_transform"
]
});
serde_wasm_bindgen::to_value(&info).unwrap_or(JsValue::NULL)
}

View File

@@ -0,0 +1,712 @@
//! Zero-Knowledge Financial Proofs
//!
//! Prove financial statements without revealing actual numbers.
//! All proofs are generated in the browser - private data never leaves.
//!
//! # ⚠️ SECURITY WARNING ⚠️
//!
//! **THIS IS A DEMONSTRATION IMPLEMENTATION - NOT PRODUCTION READY**
//!
//! The cryptographic primitives in this module are SIMPLIFIED for educational
//! purposes and API demonstration. They do NOT provide real security:
//!
//! - Custom hash function (not SHA-256)
//! - Simplified Pedersen commitments (not elliptic curve based)
//! - Mock bulletproof verification (does not verify mathematical properties)
//!
//! ## For Production Use
//!
//! Replace with battle-tested cryptographic libraries:
//! ```toml
//! bulletproofs = "4.0" # Real bulletproofs
//! curve25519-dalek = "4.0" # Elliptic curve operations
//! merlin = "3.0" # Fiat-Shamir transcripts
//! sha2 = "0.10" # Cryptographic hash
//! ```
//!
//! ## Supported Proofs (API Demo)
//!
//! - **Range Proofs**: Prove a value is within a range
//! - **Comparison Proofs**: Prove value A > value B
//! - **Aggregate Proofs**: Prove sum/average meets criteria
//! - **History Proofs**: Prove statements about transaction history
//!
//! ## Cryptographic Basis (Production)
//!
//! Real implementation would use Bulletproofs for range proofs (no trusted setup).
//! Pedersen commitments on Ristretto255 curve hide values while allowing verification.
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
// ============================================================================
// Core Types
// ============================================================================
/// A committed value - hides the actual number
///
/// # Security Note
/// In production, this would be a Ristretto255 point: `C = v·G + r·H`
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Commitment {
/// The commitment point (in production: compressed Ristretto255)
pub point: [u8; 32],
// NOTE: Blinding factor removed from struct to prevent accidental leakage.
// Prover must track blindings separately in a secure manner.
}
/// A zero-knowledge proof
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ZkProof {
/// Proof type identifier
pub proof_type: ProofType,
/// The actual proof bytes
pub proof_data: Vec<u8>,
/// Public inputs (what the verifier needs)
pub public_inputs: PublicInputs,
/// Timestamp when proof was generated
pub generated_at: u64,
/// Expiration (proofs can be time-limited)
pub expires_at: Option<u64>,
}
/// Types of proofs we can generate
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ProofType {
/// Prove: value ∈ [min, max]
Range,
/// Prove: value_a > value_b (or ≥, <, ≤)
Comparison,
/// Prove: income ≥ multiplier × expense
Affordability,
/// Prove: all values in set ≥ 0 (no overdrafts)
NonNegative,
/// Prove: sum of values ≤ threshold
SumBound,
/// Prove: average of values meets criteria
AverageBound,
/// Prove: membership in a set (e.g., verified accounts)
SetMembership,
}
/// Public inputs that verifier sees
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PublicInputs {
/// Commitments to hidden values
pub commitments: Vec<Commitment>,
/// Public threshold/bound values
pub bounds: Vec<u64>,
/// Statement being proven (human readable)
pub statement: String,
/// Optional: institution that signed the source data
pub attestation: Option<Attestation>,
}
/// Attestation from a trusted source (e.g., Plaid)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Attestation {
/// Who attested (e.g., "plaid.com")
pub issuer: String,
/// Signature over the commitments
pub signature: Vec<u8>,
/// When the attestation was made
pub timestamp: u64,
}
/// Result of proof verification
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationResult {
pub valid: bool,
pub statement: String,
pub verified_at: u64,
pub error: Option<String>,
}
// ============================================================================
// Pedersen Commitments (Simplified)
// ============================================================================
/// Pedersen commitment scheme
/// C = v*G + r*H where v=value, r=blinding, G,H=generator points
pub struct PedersenCommitment;
impl PedersenCommitment {
/// Create a commitment to a value
pub fn commit(value: u64, blinding: &[u8; 32]) -> Commitment {
// Simplified: In production, use curve25519-dalek
let mut point = [0u8; 32];
// Hash(value || blinding) as simplified commitment
let mut hasher = Sha256::new();
hasher.update(&value.to_le_bytes());
hasher.update(blinding);
let hash = hasher.finalize();
point.copy_from_slice(&hash[..32]);
Commitment {
point,
}
}
/// Generate random blinding factor
pub fn random_blinding() -> [u8; 32] {
use rand::Rng;
let mut rng = rand::thread_rng();
let mut blinding = [0u8; 32];
rng.fill(&mut blinding);
blinding
}
/// Verify a commitment opens to a value (only prover can do this)
pub fn verify_opening(commitment: &Commitment, value: u64, blinding: &[u8; 32]) -> bool {
let expected = Self::commit(value, blinding);
commitment.point == expected.point
}
}
// Simple SHA256 for commitments
struct Sha256 {
data: Vec<u8>,
}
impl Sha256 {
fn new() -> Self {
Self { data: Vec::new() }
}
fn update(&mut self, data: &[u8]) {
self.data.extend_from_slice(data);
}
fn finalize(self) -> [u8; 32] {
// Simplified hash - in production use sha2 crate
let mut result = [0u8; 32];
for (i, chunk) in self.data.chunks(32).enumerate() {
for (j, &byte) in chunk.iter().enumerate() {
result[(i + j) % 32] ^= byte.wrapping_mul((i + j + 1) as u8);
}
}
// Mix more
for i in 0..32 {
result[i] = result[i]
.wrapping_add(result[(i + 7) % 32])
.wrapping_mul(result[(i + 13) % 32] | 1);
}
result
}
}
// ============================================================================
// Range Proofs (Bulletproofs-style)
// ============================================================================
/// Bulletproof-style range proof
/// Proves: value ∈ [0, 2^n) without revealing value
pub struct RangeProof;
impl RangeProof {
/// Generate a range proof
/// Proves: committed_value ∈ [min, max]
pub fn prove(value: u64, min: u64, max: u64, blinding: &[u8; 32]) -> Result<ZkProof, String> {
// Validate range
if value < min || value > max {
return Err("Value not in range".to_string());
}
// Create commitment
let commitment = PedersenCommitment::commit(value, blinding);
// Generate proof data (simplified Bulletproof)
// In production: use bulletproofs crate
let proof_data = Self::generate_bulletproof(value, min, max, blinding);
Ok(ZkProof {
proof_type: ProofType::Range,
proof_data,
public_inputs: PublicInputs {
commitments: vec![commitment],
bounds: vec![min, max],
statement: format!("Value is between {} and {}", min, max),
attestation: None,
},
generated_at: current_timestamp(),
expires_at: Some(current_timestamp() + 86400 * 30), // 30 days
})
}
/// Verify a range proof
pub fn verify(proof: &ZkProof) -> VerificationResult {
if proof.proof_type != ProofType::Range {
return VerificationResult {
valid: false,
statement: proof.public_inputs.statement.clone(),
verified_at: current_timestamp(),
error: Some("Wrong proof type".to_string()),
};
}
// Verify the bulletproof (simplified)
let valid = Self::verify_bulletproof(
&proof.proof_data,
&proof.public_inputs.commitments[0],
proof.public_inputs.bounds[0],
proof.public_inputs.bounds[1],
);
VerificationResult {
valid,
statement: proof.public_inputs.statement.clone(),
verified_at: current_timestamp(),
error: if valid { None } else { Some("Proof verification failed".to_string()) },
}
}
// Simplified bulletproof generation
fn generate_bulletproof(value: u64, min: u64, max: u64, blinding: &[u8; 32]) -> Vec<u8> {
let mut proof = Vec::new();
// Encode shifted value (value - min)
let shifted = value - min;
let range = max - min;
// Number of bits needed
let bits = (64 - range.leading_zeros()) as usize;
// Generate bit commitments (simplified)
for i in 0..bits {
let bit = (shifted >> i) & 1;
let bit_blinding = Self::derive_bit_blinding(blinding, i);
let bit_commitment = PedersenCommitment::commit(bit, &bit_blinding);
proof.extend_from_slice(&bit_commitment.point);
}
// Add challenge response (Fiat-Shamir)
let challenge = Self::fiat_shamir_challenge(&proof, blinding);
proof.extend_from_slice(&challenge);
proof
}
// Simplified bulletproof verification
fn verify_bulletproof(
proof_data: &[u8],
commitment: &Commitment,
min: u64,
max: u64,
) -> bool {
let range = max - min;
let bits = (64 - range.leading_zeros()) as usize;
// Check proof has correct structure
let expected_len = bits * 32 + 32; // bit commitments + challenge
if proof_data.len() != expected_len {
return false;
}
// Verify structure (simplified - real bulletproofs do much more)
// In production: verify inner product argument
// Check challenge is properly formed
let challenge_start = bits * 32;
let _challenge = &proof_data[challenge_start..];
// Simplified: just check it's not all zeros
proof_data.iter().any(|&b| b != 0)
}
fn derive_bit_blinding(base_blinding: &[u8; 32], bit_index: usize) -> [u8; 32] {
let mut result = *base_blinding;
result[0] ^= bit_index as u8;
result[31] ^= (bit_index >> 8) as u8;
result
}
fn fiat_shamir_challenge(transcript: &[u8], blinding: &[u8; 32]) -> [u8; 32] {
let mut hasher = Sha256::new();
hasher.update(transcript);
hasher.update(blinding);
hasher.finalize()
}
}
// ============================================================================
// Financial Proof Builder
// ============================================================================
/// Builder for common financial proofs
pub struct FinancialProofBuilder {
/// Monthly income values
income: Vec<u64>,
/// Monthly expenses by category
expenses: HashMap<String, Vec<u64>>,
/// Account balances over time
balances: Vec<i64>,
/// Blinding factors (kept secret)
blindings: HashMap<String, [u8; 32]>,
}
impl FinancialProofBuilder {
pub fn new() -> Self {
Self {
income: Vec::new(),
expenses: HashMap::new(),
balances: Vec::new(),
blindings: HashMap::new(),
}
}
/// Add monthly income data
pub fn with_income(mut self, monthly_income: Vec<u64>) -> Self {
self.income = monthly_income;
self
}
/// Add expense category data
pub fn with_expenses(mut self, category: &str, monthly: Vec<u64>) -> Self {
self.expenses.insert(category.to_string(), monthly);
self
}
/// Add balance history
pub fn with_balances(mut self, daily_balances: Vec<i64>) -> Self {
self.balances = daily_balances;
self
}
// ========================================================================
// Proof Generation
// ========================================================================
/// Prove: income ≥ threshold
pub fn prove_income_above(&self, threshold: u64) -> Result<ZkProof, String> {
let avg_income = self.income.iter().sum::<u64>() / self.income.len().max(1) as u64;
let blinding = self.get_or_create_blinding("income");
RangeProof::prove(avg_income, threshold, u64::MAX / 2, &blinding)
.map(|mut p| {
p.public_inputs.statement = format!(
"Average monthly income ≥ ${}",
threshold
);
p
})
}
/// Prove: income ≥ multiplier × rent (affordability)
pub fn prove_affordability(&self, rent: u64, multiplier: u64) -> Result<ZkProof, String> {
let avg_income = self.income.iter().sum::<u64>() / self.income.len().max(1) as u64;
let required = rent * multiplier;
if avg_income < required {
return Err("Income does not meet affordability requirement".to_string());
}
let blinding = self.get_or_create_blinding("affordability");
// Prove income ≥ required
RangeProof::prove(avg_income, required, u64::MAX / 2, &blinding)
.map(|mut p| {
p.proof_type = ProofType::Affordability;
p.public_inputs.statement = format!(
"Income ≥ {}× monthly rent of ${}",
multiplier, rent
);
p.public_inputs.bounds = vec![rent, multiplier];
p
})
}
/// Prove: no overdrafts (all balances ≥ 0) for N days
pub fn prove_no_overdrafts(&self, days: usize) -> Result<ZkProof, String> {
let relevant_balances = if days < self.balances.len() {
&self.balances[self.balances.len() - days..]
} else {
&self.balances[..]
};
// Check all balances are non-negative
let min_balance = *relevant_balances.iter().min().unwrap_or(&0);
if min_balance < 0 {
return Err("Overdraft detected in period".to_string());
}
let blinding = self.get_or_create_blinding("no_overdraft");
// Prove minimum balance ≥ 0
RangeProof::prove(min_balance as u64, 0, u64::MAX / 2, &blinding)
.map(|mut p| {
p.proof_type = ProofType::NonNegative;
p.public_inputs.statement = format!(
"No overdrafts in the past {} days",
days
);
p.public_inputs.bounds = vec![days as u64, 0];
p
})
}
/// Prove: savings ≥ threshold
pub fn prove_savings_above(&self, threshold: u64) -> Result<ZkProof, String> {
let current_balance = *self.balances.last().unwrap_or(&0);
if current_balance < threshold as i64 {
return Err("Savings below threshold".to_string());
}
let blinding = self.get_or_create_blinding("savings");
RangeProof::prove(current_balance as u64, threshold, u64::MAX / 2, &blinding)
.map(|mut p| {
p.public_inputs.statement = format!(
"Current savings ≥ ${}",
threshold
);
p
})
}
/// Prove: average spending in category ≤ budget
pub fn prove_budget_compliance(
&self,
category: &str,
budget: u64,
) -> Result<ZkProof, String> {
let expenses = self.expenses.get(category)
.ok_or_else(|| format!("No data for category: {}", category))?;
let avg_spending = expenses.iter().sum::<u64>() / expenses.len().max(1) as u64;
if avg_spending > budget {
return Err("Average spending exceeds budget".to_string());
}
let blinding = self.get_or_create_blinding(&format!("budget_{}", category));
// Prove spending ≤ budget (equivalent to: spending ∈ [0, budget])
RangeProof::prove(avg_spending, 0, budget, &blinding)
.map(|mut p| {
p.proof_type = ProofType::SumBound;
p.public_inputs.statement = format!(
"Average {} spending ≤ ${}/month",
category, budget
);
p
})
}
/// Prove: debt-to-income ratio ≤ threshold%
pub fn prove_debt_ratio(&self, monthly_debt: u64, max_ratio: u64) -> Result<ZkProof, String> {
let avg_income = self.income.iter().sum::<u64>() / self.income.len().max(1) as u64;
// ratio = (debt * 100) / income
let actual_ratio = (monthly_debt * 100) / avg_income.max(1);
if actual_ratio > max_ratio {
return Err("Debt ratio exceeds maximum".to_string());
}
let blinding = self.get_or_create_blinding("debt_ratio");
RangeProof::prove(actual_ratio, 0, max_ratio, &blinding)
.map(|mut p| {
p.public_inputs.statement = format!(
"Debt-to-income ratio ≤ {}%",
max_ratio
);
p
})
}
// ========================================================================
// Helpers
// ========================================================================
fn get_or_create_blinding(&self, key: &str) -> [u8; 32] {
// In real impl, would store and reuse blindings
// For now, generate deterministically from key
let mut blinding = [0u8; 32];
for (i, c) in key.bytes().enumerate() {
blinding[i % 32] ^= c;
}
// Add randomness
let random = PedersenCommitment::random_blinding();
for i in 0..32 {
blinding[i] ^= random[i];
}
blinding
}
}
impl Default for FinancialProofBuilder {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// Composite Proofs (Multiple Statements)
// ============================================================================
/// A bundle of proofs for rental application
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RentalApplicationProof {
/// Prove income meets requirement
pub income_proof: ZkProof,
/// Prove no overdrafts
pub stability_proof: ZkProof,
/// Prove savings buffer
pub savings_proof: Option<ZkProof>,
/// Application metadata
pub metadata: ApplicationMetadata,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ApplicationMetadata {
pub applicant_id: String,
pub property_id: Option<String>,
pub generated_at: u64,
pub expires_at: u64,
}
impl RentalApplicationProof {
/// Create a complete rental application proof bundle
pub fn create(
builder: &FinancialProofBuilder,
rent: u64,
income_multiplier: u64,
stability_days: usize,
savings_months: Option<u64>,
) -> Result<Self, String> {
let income_proof = builder.prove_affordability(rent, income_multiplier)?;
let stability_proof = builder.prove_no_overdrafts(stability_days)?;
let savings_proof = if let Some(months) = savings_months {
Some(builder.prove_savings_above(rent * months)?)
} else {
None
};
Ok(Self {
income_proof,
stability_proof,
savings_proof,
metadata: ApplicationMetadata {
applicant_id: generate_anonymous_id(),
property_id: None,
generated_at: current_timestamp(),
expires_at: current_timestamp() + 86400 * 30, // 30 days
},
})
}
/// Verify all proofs in the bundle
pub fn verify(&self) -> Vec<VerificationResult> {
let mut results = vec![
RangeProof::verify(&self.income_proof),
RangeProof::verify(&self.stability_proof),
];
if let Some(ref savings_proof) = self.savings_proof {
results.push(RangeProof::verify(savings_proof));
}
results
}
/// Check if application is valid (all proofs pass)
pub fn is_valid(&self) -> bool {
self.verify().iter().all(|r| r.valid)
}
}
// ============================================================================
// Helpers
// ============================================================================
fn current_timestamp() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0)
}
fn generate_anonymous_id() -> String {
use rand::Rng;
let mut rng = rand::thread_rng();
let mut bytes = [0u8; 16];
rng.fill(&mut bytes);
hex::encode(bytes)
}
// ============================================================================
// Tests
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_range_proof() {
let value = 5000u64;
let blinding = PedersenCommitment::random_blinding();
let proof = RangeProof::prove(value, 3000, 10000, &blinding).unwrap();
let result = RangeProof::verify(&proof);
assert!(result.valid);
}
#[test]
fn test_income_proof() {
let builder = FinancialProofBuilder::new()
.with_income(vec![6500, 6500, 6800, 6500]); // ~$6500/month
// Prove income ≥ $5000
let proof = builder.prove_income_above(5000).unwrap();
let result = RangeProof::verify(&proof);
assert!(result.valid);
assert!(result.statement.contains("5000"));
}
#[test]
fn test_affordability_proof() {
let builder = FinancialProofBuilder::new()
.with_income(vec![6500, 6500, 6500, 6500]);
// Prove can afford $2000 rent (need 3x = $6000)
let proof = builder.prove_affordability(2000, 3).unwrap();
let result = RangeProof::verify(&proof);
assert!(result.valid);
}
#[test]
fn test_no_overdraft_proof() {
let builder = FinancialProofBuilder::new()
.with_balances(vec![1000, 800, 1200, 500, 900, 1100, 1500]);
let proof = builder.prove_no_overdrafts(7).unwrap();
let result = RangeProof::verify(&proof);
assert!(result.valid);
}
#[test]
fn test_rental_application() {
let builder = FinancialProofBuilder::new()
.with_income(vec![6500, 6500, 6500, 6500])
.with_balances(vec![5000, 5200, 4800, 5100, 5300, 5000, 5500]);
let application = RentalApplicationProof::create(
&builder,
2000, // rent
3, // income multiplier
30, // stability days
Some(2), // 2 months savings
).unwrap();
assert!(application.is_valid());
}
}

View File

@@ -0,0 +1,800 @@
//! Production-Ready Zero-Knowledge Financial Proofs
//!
//! This module provides cryptographically secure zero-knowledge proofs using:
//! - **Bulletproofs** for range proofs (no trusted setup)
//! - **Ristretto255** for Pedersen commitments (constant-time, safe API)
//! - **Merlin** for Fiat-Shamir transcripts
//! - **SHA-512** for secure hashing
//!
//! ## Security Properties
//!
//! - **Zero-Knowledge**: Verifier learns nothing beyond validity
//! - **Soundness**: Computationally infeasible to create false proofs
//! - **Completeness**: Valid statements always produce valid proofs
//! - **Side-channel resistant**: Constant-time operations throughout
//!
//! ## Usage
//!
//! ```rust,ignore
//! use ruvector_edge::plaid::zkproofs_prod::*;
//!
//! // Create prover with private data
//! let mut prover = FinancialProver::new();
//! prover.set_income(vec![650000, 650000, 680000]); // cents
//!
//! // Generate proof (income >= 3x rent)
//! let proof = prover.prove_affordability(200000, 3)?; // $2000 rent
//!
//! // Verify (learns nothing about actual income)
//! let valid = FinancialVerifier::verify(&proof)?;
//! assert!(valid);
//! ```
use bulletproofs::{BulletproofGens, PedersenGens, RangeProof as BulletproofRangeProof};
use curve25519_dalek::{ristretto::CompressedRistretto, scalar::Scalar};
use merlin::Transcript;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha512};
use std::collections::HashMap;
use subtle::ConstantTimeEq;
use zeroize::Zeroize;
// ============================================================================
// Constants
// ============================================================================
/// Domain separator for financial proof transcripts
const TRANSCRIPT_LABEL: &[u8] = b"ruvector-financial-zk-v1";
/// Maximum bit size for range proofs (64-bit values)
const MAX_BITS: usize = 64;
// Pre-computed generators - optimized for single-party proofs (not aggregation)
lazy_static::lazy_static! {
static ref BP_GENS: BulletproofGens = BulletproofGens::new(MAX_BITS, 1); // 1-party saves 8MB
static ref PC_GENS: PedersenGens = PedersenGens::default();
}
// ============================================================================
// Core Types
// ============================================================================
/// A Pedersen commitment to a hidden value
///
/// Commitment = value·G + blinding·H where G, H are Ristretto255 points
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PedersenCommitment {
/// Compressed Ristretto255 point (32 bytes)
pub point: [u8; 32],
}
impl PedersenCommitment {
/// Create a commitment to a value with random blinding
pub fn commit(value: u64) -> (Self, Scalar) {
let blinding = Scalar::random(&mut OsRng);
let commitment = PC_GENS.commit(Scalar::from(value), blinding);
(
Self {
point: commitment.compress().to_bytes(),
},
blinding,
)
}
/// Create a commitment with specified blinding factor
pub fn commit_with_blinding(value: u64, blinding: &Scalar) -> Self {
let commitment = PC_GENS.commit(Scalar::from(value), *blinding);
Self {
point: commitment.compress().to_bytes(),
}
}
/// Decompress to Ristretto point
pub fn decompress(&self) -> Option<curve25519_dalek::ristretto::RistrettoPoint> {
CompressedRistretto::from_slice(&self.point)
.ok()?
.decompress()
}
}
/// Zero-knowledge range proof
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ZkRangeProof {
/// The cryptographic proof bytes
pub proof_bytes: Vec<u8>,
/// Commitment to the value being proved
pub commitment: PedersenCommitment,
/// Lower bound (public)
pub min: u64,
/// Upper bound (public)
pub max: u64,
/// Human-readable statement
pub statement: String,
/// Proof metadata
pub metadata: ProofMetadata,
}
/// Proof metadata
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProofMetadata {
/// When the proof was generated (Unix timestamp)
pub generated_at: u64,
/// When the proof expires (optional)
pub expires_at: Option<u64>,
/// Proof version for compatibility
pub version: u8,
/// Hash of the proof for integrity
pub hash: [u8; 32],
}
impl ProofMetadata {
fn new(proof_bytes: &[u8], expires_in_days: Option<u64>) -> Self {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
let mut hasher = Sha512::new();
hasher.update(proof_bytes);
let hash_result = hasher.finalize();
let mut hash = [0u8; 32];
hash.copy_from_slice(&hash_result[..32]);
Self {
generated_at: now,
expires_at: expires_in_days.map(|d| now + d * 86400),
version: 1,
hash,
}
}
/// Check if proof is expired
pub fn is_expired(&self) -> bool {
if let Some(expires) = self.expires_at {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
now > expires
} else {
false
}
}
}
/// Verification result
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationResult {
/// Whether the proof is valid
pub valid: bool,
/// The statement that was verified
pub statement: String,
/// When verification occurred
pub verified_at: u64,
/// Any error message
pub error: Option<String>,
}
// ============================================================================
// Financial Prover
// ============================================================================
/// Prover for financial statements
///
/// Stores private financial data and generates ZK proofs.
/// Blinding factors are automatically zeroized on drop for security.
pub struct FinancialProver {
/// Monthly income values (in cents)
income: Vec<u64>,
/// Daily balance history (in cents, can be negative represented as i64 then converted)
balances: Vec<i64>,
/// Monthly expenses by category
expenses: HashMap<String, Vec<u64>>,
/// Blinding factors for commitments (to allow proof combination)
/// SECURITY: These are sensitive - zeroized on drop
blindings: HashMap<String, Scalar>,
}
impl Drop for FinancialProver {
fn drop(&mut self) {
// Zeroize sensitive data on drop to prevent memory extraction attacks
// Note: Scalar internally uses [u8; 32] which we can't directly zeroize,
// but clearing the HashMap removes references
self.blindings.clear();
self.income.zeroize();
self.balances.zeroize();
// Zeroize expense values
for expenses in self.expenses.values_mut() {
expenses.zeroize();
}
self.expenses.clear();
}
}
impl FinancialProver {
/// Create a new prover
pub fn new() -> Self {
Self {
income: Vec::new(),
balances: Vec::new(),
expenses: HashMap::new(),
blindings: HashMap::new(),
}
}
/// Set monthly income data
pub fn set_income(&mut self, monthly_income: Vec<u64>) {
self.income = monthly_income;
}
/// Set daily balance history
pub fn set_balances(&mut self, daily_balances: Vec<i64>) {
self.balances = daily_balances;
}
/// Set expense data for a category
pub fn set_expenses(&mut self, category: &str, monthly_expenses: Vec<u64>) {
self.expenses.insert(category.to_string(), monthly_expenses);
}
// ========================================================================
// Proof Generation
// ========================================================================
/// Prove: average income >= threshold
pub fn prove_income_above(&mut self, threshold: u64) -> Result<ZkRangeProof, String> {
if self.income.is_empty() {
return Err("No income data provided".to_string());
}
let avg_income = self.income.iter().sum::<u64>() / self.income.len() as u64;
if avg_income < threshold {
return Err("Income does not meet threshold".to_string());
}
// Prove: avg_income - threshold >= 0 (i.e., avg_income is in range [threshold, max])
self.create_range_proof(
avg_income,
threshold,
u64::MAX / 2,
format!("Average monthly income >= ${:.2}", threshold as f64 / 100.0),
"income",
)
}
/// Prove: income >= multiplier × rent (affordability)
pub fn prove_affordability(&mut self, rent: u64, multiplier: u64) -> Result<ZkRangeProof, String> {
// Input validation to prevent trivial proof bypass
if rent == 0 {
return Err("Rent must be greater than zero".to_string());
}
if multiplier == 0 || multiplier > 100 {
return Err("Multiplier must be between 1 and 100".to_string());
}
if self.income.is_empty() {
return Err("No income data provided".to_string());
}
let avg_income = self.income.iter().sum::<u64>() / self.income.len() as u64;
let required = rent.checked_mul(multiplier)
.ok_or("Rent × multiplier overflow")?;
if avg_income < required {
return Err(format!(
"Income ${:.2} does not meet {}x rent requirement ${:.2}",
avg_income as f64 / 100.0,
multiplier,
required as f64 / 100.0
));
}
self.create_range_proof(
avg_income,
required,
u64::MAX / 2,
format!(
"Income >= {}× monthly rent of ${:.2}",
multiplier,
rent as f64 / 100.0
),
"affordability",
)
}
/// Prove: minimum balance >= 0 for last N days (no overdrafts)
pub fn prove_no_overdrafts(&mut self, days: usize) -> Result<ZkRangeProof, String> {
if self.balances.is_empty() {
return Err("No balance data provided".to_string());
}
let relevant = if days < self.balances.len() {
&self.balances[self.balances.len() - days..]
} else {
&self.balances[..]
};
let min_balance = *relevant.iter().min().unwrap_or(&0);
if min_balance < 0 {
return Err("Overdraft detected in the specified period".to_string());
}
// Prove minimum balance is non-negative
self.create_range_proof(
min_balance as u64,
0,
u64::MAX / 2,
format!("No overdrafts in the past {} days", days),
"no_overdraft",
)
}
/// Prove: current savings >= threshold
pub fn prove_savings_above(&mut self, threshold: u64) -> Result<ZkRangeProof, String> {
if self.balances.is_empty() {
return Err("No balance data provided".to_string());
}
let current = *self.balances.last().unwrap_or(&0);
if current < threshold as i64 {
return Err("Savings do not meet threshold".to_string());
}
self.create_range_proof(
current as u64,
threshold,
u64::MAX / 2,
format!("Current savings >= ${:.2}", threshold as f64 / 100.0),
"savings",
)
}
/// Prove: average spending in category <= budget
pub fn prove_budget_compliance(
&mut self,
category: &str,
budget: u64,
) -> Result<ZkRangeProof, String> {
// Input validation
if category.is_empty() {
return Err("Category must not be empty".to_string());
}
if budget == 0 {
return Err("Budget must be greater than zero".to_string());
}
let expenses = self
.expenses
.get(category)
.ok_or_else(|| format!("No data for category: {}", category))?;
if expenses.is_empty() {
return Err("No expense data for category".to_string());
}
let avg_spending = expenses.iter().sum::<u64>() / expenses.len() as u64;
if avg_spending > budget {
return Err(format!(
"Average spending ${:.2} exceeds budget ${:.2}",
avg_spending as f64 / 100.0,
budget as f64 / 100.0
));
}
// Prove: avg_spending is in range [0, budget]
self.create_range_proof(
avg_spending,
0,
budget,
format!(
"Average {} spending <= ${:.2}/month",
category,
budget as f64 / 100.0
),
&format!("budget_{}", category),
)
}
// ========================================================================
// Internal
// ========================================================================
/// Create a range proof using Bulletproofs
fn create_range_proof(
&mut self,
value: u64,
min: u64,
max: u64,
statement: String,
key: &str,
) -> Result<ZkRangeProof, String> {
// Shift value to prove it's in [0, max-min]
let shifted_value = value.checked_sub(min).ok_or("Value below minimum")?;
let range = max.checked_sub(min).ok_or("Invalid range")?;
// Determine number of bits needed - Bulletproofs requires power of 2
let raw_bits = (64 - range.leading_zeros()) as usize;
// Round up to next power of 2: 8, 16, 32, or 64
let bits = match raw_bits {
0..=8 => 8,
9..=16 => 16,
17..=32 => 32,
_ => 64,
};
// Generate or retrieve blinding factor
let blinding = self
.blindings
.entry(key.to_string())
.or_insert_with(|| Scalar::random(&mut OsRng))
.clone();
// Create commitment
let commitment = PedersenCommitment::commit_with_blinding(shifted_value, &blinding);
// Create Fiat-Shamir transcript
let mut transcript = Transcript::new(TRANSCRIPT_LABEL);
transcript.append_message(b"statement", statement.as_bytes());
transcript.append_u64(b"min", min);
transcript.append_u64(b"max", max);
// Generate Bulletproof
let (proof, _) = BulletproofRangeProof::prove_single(
&BP_GENS,
&PC_GENS,
&mut transcript,
shifted_value,
&blinding,
bits,
)
.map_err(|e| format!("Proof generation failed: {:?}", e))?;
let proof_bytes = proof.to_bytes();
let metadata = ProofMetadata::new(&proof_bytes, Some(30)); // 30 day expiry
Ok(ZkRangeProof {
proof_bytes,
commitment,
min,
max,
statement,
metadata,
})
}
}
impl Default for FinancialProver {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// Financial Verifier
// ============================================================================
/// Verifier for financial proofs
///
/// Verifies ZK proofs without learning private values.
pub struct FinancialVerifier;
impl FinancialVerifier {
/// Verify a range proof
pub fn verify(proof: &ZkRangeProof) -> Result<VerificationResult, String> {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
// Check expiration
if proof.metadata.is_expired() {
return Ok(VerificationResult {
valid: false,
statement: proof.statement.clone(),
verified_at: now,
error: Some("Proof has expired".to_string()),
});
}
// Verify proof hash integrity
let mut hasher = Sha512::new();
hasher.update(&proof.proof_bytes);
let hash_result = hasher.finalize();
let computed_hash: [u8; 32] = hash_result[..32].try_into().unwrap();
if computed_hash.ct_ne(&proof.metadata.hash).into() {
return Ok(VerificationResult {
valid: false,
statement: proof.statement.clone(),
verified_at: now,
error: Some("Proof integrity check failed".to_string()),
});
}
// Decompress commitment
let commitment_point = proof
.commitment
.decompress()
.ok_or("Invalid commitment point")?;
// Recreate transcript with same parameters
let mut transcript = Transcript::new(TRANSCRIPT_LABEL);
transcript.append_message(b"statement", proof.statement.as_bytes());
transcript.append_u64(b"min", proof.min);
transcript.append_u64(b"max", proof.max);
// Parse bulletproof
let bulletproof = BulletproofRangeProof::from_bytes(&proof.proof_bytes)
.map_err(|e| format!("Invalid proof format: {:?}", e))?;
// Determine bits from range - must match prover's power-of-2 calculation
let range = proof.max.saturating_sub(proof.min);
let raw_bits = (64 - range.leading_zeros()) as usize;
let bits = match raw_bits {
0..=8 => 8,
9..=16 => 16,
17..=32 => 32,
_ => 64,
};
// Verify the bulletproof
let result = bulletproof.verify_single(
&BP_GENS,
&PC_GENS,
&mut transcript,
&commitment_point.compress(),
bits,
);
match result {
Ok(_) => Ok(VerificationResult {
valid: true,
statement: proof.statement.clone(),
verified_at: now,
error: None,
}),
Err(e) => Ok(VerificationResult {
valid: false,
statement: proof.statement.clone(),
verified_at: now,
error: Some(format!("Verification failed: {:?}", e)),
}),
}
}
/// Batch verify multiple proofs (more efficient)
pub fn verify_batch(proofs: &[ZkRangeProof]) -> Vec<VerificationResult> {
// For now, verify individually
// TODO: Implement batch verification for efficiency
proofs.iter().map(|p| Self::verify(p).unwrap_or_else(|e| {
VerificationResult {
valid: false,
statement: p.statement.clone(),
verified_at: 0,
error: Some(e),
}
})).collect()
}
}
// ============================================================================
// Composite Proofs
// ============================================================================
/// Complete rental application proof bundle
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RentalApplicationBundle {
/// Proof of income meeting affordability requirement
pub income_proof: ZkRangeProof,
/// Proof of no overdrafts
pub stability_proof: ZkRangeProof,
/// Proof of savings buffer (optional)
pub savings_proof: Option<ZkRangeProof>,
/// Application metadata
pub application_id: String,
/// When the bundle was created
pub created_at: u64,
/// Bundle hash for integrity
pub bundle_hash: [u8; 32],
}
impl RentalApplicationBundle {
/// Create a complete rental application bundle
pub fn create(
prover: &mut FinancialProver,
rent: u64,
income_multiplier: u64,
stability_days: usize,
savings_months: Option<u64>,
) -> Result<Self, String> {
let income_proof = prover.prove_affordability(rent, income_multiplier)?;
let stability_proof = prover.prove_no_overdrafts(stability_days)?;
let savings_proof = if let Some(months) = savings_months {
Some(prover.prove_savings_above(rent * months)?)
} else {
None
};
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs())
.unwrap_or(0);
// Generate application ID
let mut id_hasher = Sha512::new();
id_hasher.update(&income_proof.commitment.point);
id_hasher.update(&stability_proof.commitment.point);
id_hasher.update(&now.to_le_bytes());
let id_hash = id_hasher.finalize();
let application_id = hex::encode(&id_hash[..16]);
// Generate bundle hash
let mut bundle_hasher = Sha512::new();
bundle_hasher.update(&income_proof.proof_bytes);
bundle_hasher.update(&stability_proof.proof_bytes);
if let Some(ref sp) = savings_proof {
bundle_hasher.update(&sp.proof_bytes);
}
let bundle_hash_result = bundle_hasher.finalize();
let mut bundle_hash = [0u8; 32];
bundle_hash.copy_from_slice(&bundle_hash_result[..32]);
Ok(Self {
income_proof,
stability_proof,
savings_proof,
application_id,
created_at: now,
bundle_hash,
})
}
/// Verify the entire bundle
pub fn verify(&self) -> Result<bool, String> {
// Verify bundle integrity
let mut bundle_hasher = Sha512::new();
bundle_hasher.update(&self.income_proof.proof_bytes);
bundle_hasher.update(&self.stability_proof.proof_bytes);
if let Some(ref sp) = self.savings_proof {
bundle_hasher.update(&sp.proof_bytes);
}
let computed_hash = bundle_hasher.finalize();
if computed_hash[..32].ct_ne(&self.bundle_hash).into() {
return Err("Bundle integrity check failed".to_string());
}
// Verify individual proofs
let income_result = FinancialVerifier::verify(&self.income_proof)?;
if !income_result.valid {
return Ok(false);
}
let stability_result = FinancialVerifier::verify(&self.stability_proof)?;
if !stability_result.valid {
return Ok(false);
}
if let Some(ref savings_proof) = self.savings_proof {
let savings_result = FinancialVerifier::verify(savings_proof)?;
if !savings_result.valid {
return Ok(false);
}
}
Ok(true)
}
}
// ============================================================================
// Tests
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_income_proof() {
let mut prover = FinancialProver::new();
prover.set_income(vec![650000, 650000, 680000, 650000]); // ~$6500/month
// Should succeed: income > $5000
let proof = prover.prove_income_above(500000).unwrap();
let result = FinancialVerifier::verify(&proof).unwrap();
assert!(result.valid, "Proof should be valid");
// Should fail: income < $10000
let result = prover.prove_income_above(1000000);
assert!(result.is_err(), "Should fail for threshold above income");
}
#[test]
fn test_affordability_proof() {
let mut prover = FinancialProver::new();
prover.set_income(vec![650000, 650000, 650000, 650000]); // $6500/month
// Should succeed: $6500 >= 3 × $2000
let proof = prover.prove_affordability(200000, 3).unwrap();
let result = FinancialVerifier::verify(&proof).unwrap();
assert!(result.valid);
// Should fail: $6500 < 3 × $3000
let result = prover.prove_affordability(300000, 3);
assert!(result.is_err());
}
#[test]
fn test_no_overdraft_proof() {
let mut prover = FinancialProver::new();
prover.set_balances(vec![100000, 80000, 120000, 50000, 90000]); // All positive
let proof = prover.prove_no_overdrafts(5).unwrap();
let result = FinancialVerifier::verify(&proof).unwrap();
assert!(result.valid);
}
#[test]
fn test_overdraft_fails() {
let mut prover = FinancialProver::new();
prover.set_balances(vec![100000, -5000, 120000]); // Has overdraft
let result = prover.prove_no_overdrafts(3);
assert!(result.is_err());
}
#[test]
fn test_rental_application_bundle() {
let mut prover = FinancialProver::new();
prover.set_income(vec![650000, 650000, 680000, 650000]);
prover.set_balances(vec![500000, 520000, 480000, 510000, 530000]);
let bundle = RentalApplicationBundle::create(
&mut prover,
200000, // $2000 rent
3, // 3x income
30, // 30 days stability
Some(2), // 2 months savings
)
.unwrap();
assert!(bundle.verify().unwrap());
}
#[test]
fn test_proof_expiration() {
let mut prover = FinancialProver::new();
prover.set_income(vec![650000]);
let mut proof = prover.prove_income_above(500000).unwrap();
// Manually expire the proof
proof.metadata.expires_at = Some(0);
let result = FinancialVerifier::verify(&proof).unwrap();
assert!(!result.valid);
assert!(result.error.as_ref().unwrap().contains("expired"));
}
#[test]
fn test_proof_integrity() {
let mut prover = FinancialProver::new();
prover.set_income(vec![650000]);
let mut proof = prover.prove_income_above(500000).unwrap();
// Tamper with the proof
if !proof.proof_bytes.is_empty() {
proof.proof_bytes[0] ^= 0xFF;
}
let result = FinancialVerifier::verify(&proof).unwrap();
assert!(!result.valid);
}
}

View File

@@ -0,0 +1,278 @@
//! Swarm communication protocol
//!
//! Defines message types and serialization for agent communication.
use crate::intelligence::LearningState;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// Message types for swarm communication
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum MessageType {
/// Agent joining the swarm
Join,
/// Agent leaving the swarm
Leave,
/// Heartbeat/ping
Ping,
/// Heartbeat response
Pong,
/// Sync learning patterns
SyncPatterns,
/// Request patterns from peer
RequestPatterns,
/// Sync vector memories
SyncMemories,
/// Request memories from peer
RequestMemories,
/// Broadcast task to swarm
BroadcastTask,
/// Task result
TaskResult,
/// Coordinator election
Election,
/// Coordinator announcement
Coordinator,
/// Error message
Error,
}
/// Swarm message envelope
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SwarmMessage {
pub id: String,
pub message_type: MessageType,
pub sender_id: String,
pub recipient_id: Option<String>, // None = broadcast
pub payload: MessagePayload,
pub timestamp: u64,
pub ttl: u32, // Time-to-live in hops
}
impl SwarmMessage {
/// Create new message
pub fn new(message_type: MessageType, sender_id: &str, payload: MessagePayload) -> Self {
Self {
id: Uuid::new_v4().to_string(),
message_type,
sender_id: sender_id.to_string(),
recipient_id: None,
payload,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
ttl: 10,
}
}
/// Create directed message
pub fn directed(
message_type: MessageType,
sender_id: &str,
recipient_id: &str,
payload: MessagePayload,
) -> Self {
Self {
id: Uuid::new_v4().to_string(),
message_type,
sender_id: sender_id.to_string(),
recipient_id: Some(recipient_id.to_string()),
payload,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
ttl: 10,
}
}
/// Create join message
pub fn join(agent_id: &str, role: &str, capabilities: Vec<String>) -> Self {
Self::new(
MessageType::Join,
agent_id,
MessagePayload::Join(JoinPayload {
agent_role: role.to_string(),
capabilities,
version: env!("CARGO_PKG_VERSION").to_string(),
}),
)
}
/// Create leave message
pub fn leave(agent_id: &str) -> Self {
Self::new(MessageType::Leave, agent_id, MessagePayload::Empty)
}
/// Create ping message
pub fn ping(agent_id: &str) -> Self {
Self::new(MessageType::Ping, agent_id, MessagePayload::Empty)
}
/// Create pong response
pub fn pong(agent_id: &str) -> Self {
Self::new(MessageType::Pong, agent_id, MessagePayload::Empty)
}
/// Create pattern sync message
pub fn sync_patterns(agent_id: &str, state: LearningState) -> Self {
Self::new(
MessageType::SyncPatterns,
agent_id,
MessagePayload::Patterns(PatternsPayload {
state,
compressed: false,
}),
)
}
/// Create pattern request message
pub fn request_patterns(agent_id: &str, since_version: u64) -> Self {
Self::new(
MessageType::RequestPatterns,
agent_id,
MessagePayload::Request(RequestPayload {
since_version,
max_entries: 1000,
}),
)
}
/// Create task broadcast message
pub fn broadcast_task(agent_id: &str, task: TaskPayload) -> Self {
Self::new(MessageType::BroadcastTask, agent_id, MessagePayload::Task(task))
}
/// Create error message
pub fn error(agent_id: &str, error: &str) -> Self {
Self::new(
MessageType::Error,
agent_id,
MessagePayload::Error(ErrorPayload {
code: "ERROR".to_string(),
message: error.to_string(),
}),
)
}
/// Serialize to bytes
pub fn to_bytes(&self) -> Result<Vec<u8>, serde_json::Error> {
serde_json::to_vec(self)
}
/// Deserialize from bytes
pub fn from_bytes(data: &[u8]) -> Result<Self, serde_json::Error> {
serde_json::from_slice(data)
}
/// Check if message is expired (based on timestamp)
pub fn is_expired(&self, max_age_ms: u64) -> bool {
let now = chrono::Utc::now().timestamp_millis() as u64;
now - self.timestamp > max_age_ms
}
/// Decrement TTL for forwarding
pub fn decrement_ttl(&mut self) -> bool {
if self.ttl > 0 {
self.ttl -= 1;
true
} else {
false
}
}
}
/// Message payload variants
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum MessagePayload {
Empty,
Join(JoinPayload),
Patterns(PatternsPayload),
Memories(MemoriesPayload),
Request(RequestPayload),
Task(TaskPayload),
TaskResult(TaskResultPayload),
Election(ElectionPayload),
Error(ErrorPayload),
Raw(Vec<u8>),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct JoinPayload {
pub agent_role: String,
pub capabilities: Vec<String>,
pub version: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PatternsPayload {
pub state: LearningState,
pub compressed: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MemoriesPayload {
pub entries: Vec<u8>, // Compressed vector entries
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RequestPayload {
pub since_version: u64,
pub max_entries: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskPayload {
pub task_id: String,
pub task_type: String,
pub description: String,
pub parameters: serde_json::Value,
pub priority: u8,
pub timeout_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskResultPayload {
pub task_id: String,
pub success: bool,
pub result: serde_json::Value,
pub execution_time_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ElectionPayload {
pub candidate_id: String,
pub priority: u64,
pub term: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErrorPayload {
pub code: String,
pub message: String,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_message_serialization() {
let msg = SwarmMessage::join("agent-001", "worker", vec!["compute".to_string()]);
let bytes = msg.to_bytes().unwrap();
let decoded = SwarmMessage::from_bytes(&bytes).unwrap();
assert_eq!(decoded.sender_id, "agent-001");
assert!(matches!(decoded.message_type, MessageType::Join));
}
#[test]
fn test_ttl_decrement() {
let mut msg = SwarmMessage::ping("agent-001");
assert_eq!(msg.ttl, 10);
assert!(msg.decrement_ttl());
assert_eq!(msg.ttl, 9);
msg.ttl = 0;
assert!(!msg.decrement_ttl());
}
}

View File

@@ -0,0 +1,230 @@
//! Transport layer abstraction over ruv-swarm-transport
//!
//! Provides unified interface for WebSocket, SharedMemory, and WASM transports.
use crate::{Result, SwarmError};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tokio::sync::{mpsc, RwLock};
/// Transport types supported
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum Transport {
/// WebSocket for remote communication
WebSocket,
/// SharedMemory for local high-performance IPC
SharedMemory,
/// WASM-compatible transport for browser
#[cfg(feature = "wasm")]
Wasm,
}
impl Default for Transport {
fn default() -> Self {
Transport::WebSocket
}
}
/// Transport configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TransportConfig {
pub transport_type: Transport,
pub buffer_size: usize,
pub reconnect_interval_ms: u64,
pub max_message_size: usize,
pub enable_compression: bool,
}
impl Default for TransportConfig {
fn default() -> Self {
Self {
transport_type: Transport::WebSocket,
buffer_size: 1024,
reconnect_interval_ms: 5000,
max_message_size: 16 * 1024 * 1024, // 16MB
enable_compression: true,
}
}
}
/// Unified transport handle
pub struct TransportHandle {
pub(crate) transport_type: Transport,
pub(crate) sender: mpsc::Sender<Vec<u8>>,
pub(crate) receiver: Arc<RwLock<mpsc::Receiver<Vec<u8>>>>,
pub(crate) connected: Arc<RwLock<bool>>,
}
impl TransportHandle {
/// Create new transport handle
pub fn new(transport_type: Transport) -> Self {
let (tx, rx) = mpsc::channel(1024);
Self {
transport_type,
sender: tx,
receiver: Arc::new(RwLock::new(rx)),
connected: Arc::new(RwLock::new(false)),
}
}
/// Check if connected
pub async fn is_connected(&self) -> bool {
*self.connected.read().await
}
/// Send raw bytes
pub async fn send(&self, data: Vec<u8>) -> Result<()> {
self.sender
.send(data)
.await
.map_err(|e| SwarmError::Transport(e.to_string()))
}
/// Receive raw bytes
pub async fn recv(&self) -> Result<Vec<u8>> {
let mut rx = self.receiver.write().await;
rx.recv()
.await
.ok_or_else(|| SwarmError::Transport("Channel closed".into()))
}
}
/// WebSocket transport implementation
pub mod websocket {
use super::*;
/// WebSocket connection state
pub struct WebSocketTransport {
pub url: String,
pub handle: TransportHandle,
}
impl WebSocketTransport {
/// Connect to WebSocket server
pub async fn connect(url: &str) -> Result<Self> {
let handle = TransportHandle::new(Transport::WebSocket);
// In real implementation, use ruv-swarm-transport's WebSocket
// For now, create a mock connection
tracing::info!("Connecting to WebSocket: {}", url);
*handle.connected.write().await = true;
Ok(Self {
url: url.to_string(),
handle,
})
}
/// Send message
pub async fn send(&self, data: Vec<u8>) -> Result<()> {
self.handle.send(data).await
}
/// Receive message
pub async fn recv(&self) -> Result<Vec<u8>> {
self.handle.recv().await
}
}
}
/// SharedMemory transport for local IPC
pub mod shared_memory {
use super::*;
/// Shared memory segment
pub struct SharedMemoryTransport {
pub name: String,
pub size: usize,
pub handle: TransportHandle,
}
impl SharedMemoryTransport {
/// Create or attach to shared memory
pub fn new(name: &str, size: usize) -> Result<Self> {
let handle = TransportHandle::new(Transport::SharedMemory);
tracing::info!("Creating shared memory: {} ({}KB)", name, size / 1024);
Ok(Self {
name: name.to_string(),
size,
handle,
})
}
/// Write to shared memory
pub async fn write(&self, offset: usize, data: &[u8]) -> Result<()> {
if offset + data.len() > self.size {
return Err(SwarmError::Transport("Buffer overflow".into()));
}
self.handle.send(data.to_vec()).await
}
/// Read from shared memory
pub async fn read(&self, _offset: usize, _len: usize) -> Result<Vec<u8>> {
self.handle.recv().await
}
}
}
/// WASM-compatible transport
#[cfg(feature = "wasm")]
pub mod wasm_transport {
use super::*;
use wasm_bindgen::prelude::*;
/// WASM transport using BroadcastChannel or postMessage
#[wasm_bindgen]
pub struct WasmTransport {
channel_name: String,
handle: TransportHandle,
}
impl WasmTransport {
pub fn new(channel_name: &str) -> Result<Self> {
let handle = TransportHandle::new(Transport::Wasm);
Ok(Self {
channel_name: channel_name.to_string(),
handle,
})
}
pub async fn broadcast(&self, data: Vec<u8>) -> Result<()> {
self.handle.send(data).await
}
pub async fn receive(&self) -> Result<Vec<u8>> {
self.handle.recv().await
}
}
}
/// Transport factory
pub struct TransportFactory;
impl TransportFactory {
/// Create transport based on type
pub async fn create(config: &TransportConfig, url: Option<&str>) -> Result<TransportHandle> {
match config.transport_type {
Transport::WebSocket => {
let url = url.ok_or_else(|| SwarmError::Config("URL required for WebSocket".into()))?;
let ws = websocket::WebSocketTransport::connect(url).await?;
Ok(ws.handle)
}
Transport::SharedMemory => {
let shm = shared_memory::SharedMemoryTransport::new(
"ruvector-swarm",
config.buffer_size * 1024,
)?;
Ok(shm.handle)
}
#[cfg(feature = "wasm")]
Transport::Wasm => {
let wasm = wasm_transport::WasmTransport::new("ruvector-channel")?;
Ok(wasm.handle)
}
}
}
}

View File

@@ -0,0 +1,644 @@
//! WASM Bindings for RuVector Edge
//!
//! Exposes P2P swarm functionality to JavaScript/TypeScript via wasm-bindgen.
//!
//! ## Usage in JavaScript/TypeScript
//!
//! ```typescript
//! import init, {
//! WasmIdentity,
//! WasmCrypto,
//! WasmHnswIndex,
//! WasmSemanticMatcher,
//! WasmRaftNode
//! } from 'ruvector-edge';
//!
//! await init();
//!
//! // Create identity
//! const identity = new WasmIdentity();
//! console.log('Public key:', identity.publicKeyHex());
//!
//! // Sign and verify
//! const signature = identity.sign('Hello, World!');
//! console.log('Valid:', WasmIdentity.verify(identity.publicKeyHex(), 'Hello, World!', signature));
//! ```
#![cfg(feature = "wasm")]
use wasm_bindgen::prelude::*;
use serde::{Deserialize, Serialize};
use crate::p2p::{
IdentityManager, CryptoV2, HnswIndex, SemanticTaskMatcher,
RaftNode, RaftState, HybridKeyPair, SpikingNetwork,
BinaryQuantized, ScalarQuantized, AdaptiveCompressor, NetworkCondition,
};
// ============================================================================
// WASM Identity Manager
// ============================================================================
/// WASM-compatible Identity Manager for Ed25519/X25519 cryptography
#[wasm_bindgen]
pub struct WasmIdentity {
inner: IdentityManager,
}
#[wasm_bindgen]
impl WasmIdentity {
/// Create a new identity with generated keys
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
inner: IdentityManager::new(),
}
}
/// Get Ed25519 public key as hex string
#[wasm_bindgen(js_name = publicKeyHex)]
pub fn public_key_hex(&self) -> String {
hex::encode(self.inner.public_key())
}
/// Get X25519 public key as hex string (for key exchange)
#[wasm_bindgen(js_name = x25519PublicKeyHex)]
pub fn x25519_public_key_hex(&self) -> String {
hex::encode(self.inner.x25519_public_key())
}
/// Sign a message and return signature as hex
#[wasm_bindgen]
pub fn sign(&self, message: &str) -> String {
hex::encode(self.inner.sign(message.as_bytes()))
}
/// Sign raw bytes and return signature as hex
#[wasm_bindgen(js_name = signBytes)]
pub fn sign_bytes(&self, data: &[u8]) -> String {
hex::encode(self.inner.sign(data))
}
/// Verify a signature (static method)
#[wasm_bindgen]
pub fn verify(public_key_hex: &str, message: &str, signature_hex: &str) -> bool {
let Ok(pubkey_bytes) = hex::decode(public_key_hex) else {
return false;
};
let Ok(sig_bytes) = hex::decode(signature_hex) else {
return false;
};
if pubkey_bytes.len() != 32 || sig_bytes.len() != 64 {
return false;
}
let mut pubkey = [0u8; 32];
let mut signature = [0u8; 64];
pubkey.copy_from_slice(&pubkey_bytes);
signature.copy_from_slice(&sig_bytes);
crate::p2p::KeyPair::verify(&pubkey, message.as_bytes(), &signature)
}
/// Generate a random nonce
#[wasm_bindgen(js_name = generateNonce)]
pub fn generate_nonce() -> String {
IdentityManager::generate_nonce()
}
/// Create a signed registration for this identity
#[wasm_bindgen(js_name = createRegistration)]
pub fn create_registration(&self, agent_id: &str, capabilities: JsValue) -> Result<JsValue, JsValue> {
let caps: Vec<String> = serde_wasm_bindgen::from_value(capabilities)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
let registration = self.inner.create_registration(agent_id, caps);
serde_wasm_bindgen::to_value(&registration)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
}
impl Default for WasmIdentity {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// WASM Crypto Utilities
// ============================================================================
/// WASM-compatible cryptographic utilities
#[wasm_bindgen]
pub struct WasmCrypto;
#[wasm_bindgen]
impl WasmCrypto {
/// SHA-256 hash as hex string
#[wasm_bindgen]
pub fn sha256(data: &[u8]) -> String {
CryptoV2::hash_hex(data)
}
/// SHA-256 hash of string as hex
#[wasm_bindgen(js_name = sha256String)]
pub fn sha256_string(text: &str) -> String {
CryptoV2::hash_hex(text.as_bytes())
}
/// Generate a local CID for data
#[wasm_bindgen(js_name = generateCid)]
pub fn generate_cid(data: &[u8]) -> String {
CryptoV2::generate_local_cid(data)
}
/// Encrypt data with AES-256-GCM (key as hex)
#[wasm_bindgen]
pub fn encrypt(data: &[u8], key_hex: &str) -> Result<JsValue, JsValue> {
let key_bytes = hex::decode(key_hex)
.map_err(|e| JsValue::from_str(&format!("Invalid key hex: {}", e)))?;
if key_bytes.len() != 32 {
return Err(JsValue::from_str("Key must be 32 bytes (64 hex chars)"));
}
let mut key = [0u8; 32];
key.copy_from_slice(&key_bytes);
let encrypted = CryptoV2::encrypt(data, &key)
.map_err(|e| JsValue::from_str(&e))?;
serde_wasm_bindgen::to_value(&encrypted)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Decrypt data with AES-256-GCM
#[wasm_bindgen]
pub fn decrypt(encrypted: JsValue, key_hex: &str) -> Result<Vec<u8>, JsValue> {
let key_bytes = hex::decode(key_hex)
.map_err(|e| JsValue::from_str(&format!("Invalid key hex: {}", e)))?;
if key_bytes.len() != 32 {
return Err(JsValue::from_str("Key must be 32 bytes"));
}
let mut key = [0u8; 32];
key.copy_from_slice(&key_bytes);
let encrypted_payload: crate::p2p::EncryptedPayload =
serde_wasm_bindgen::from_value(encrypted)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
CryptoV2::decrypt(&encrypted_payload, &key)
.map_err(|e| JsValue::from_str(&e))
}
}
// ============================================================================
// WASM HNSW Vector Index
// ============================================================================
/// WASM-compatible HNSW index for fast vector similarity search
#[wasm_bindgen]
pub struct WasmHnswIndex {
inner: HnswIndex,
}
#[wasm_bindgen]
impl WasmHnswIndex {
/// Create new HNSW index with default parameters
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
inner: HnswIndex::new(),
}
}
/// Create with custom parameters (m = connections per node, ef = search width)
#[wasm_bindgen(js_name = withParams)]
pub fn with_params(m: usize, ef_construction: usize) -> Self {
Self {
inner: HnswIndex::with_params(m, ef_construction),
}
}
/// Insert a vector with an ID
#[wasm_bindgen]
pub fn insert(&mut self, id: &str, vector: Vec<f32>) {
self.inner.insert(id, vector);
}
/// Search for k nearest neighbors, returns JSON array of {id, distance}
#[wasm_bindgen]
pub fn search(&self, query: Vec<f32>, k: usize) -> JsValue {
let results = self.inner.search(&query, k);
let json_results: Vec<SearchResult> = results
.into_iter()
.map(|(id, dist)| SearchResult { id, distance: dist })
.collect();
serde_wasm_bindgen::to_value(&json_results).unwrap_or(JsValue::NULL)
}
/// Get number of vectors in index
#[wasm_bindgen]
pub fn len(&self) -> usize {
self.inner.len()
}
/// Check if index is empty
#[wasm_bindgen(js_name = isEmpty)]
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Default for WasmHnswIndex {
fn default() -> Self {
Self::new()
}
}
#[derive(Serialize, Deserialize)]
struct SearchResult {
id: String,
distance: f32,
}
// ============================================================================
// WASM Semantic Task Matcher
// ============================================================================
/// WASM-compatible semantic task matcher for intelligent agent routing
#[wasm_bindgen]
pub struct WasmSemanticMatcher {
inner: SemanticTaskMatcher,
}
#[wasm_bindgen]
impl WasmSemanticMatcher {
/// Create new semantic matcher
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
inner: SemanticTaskMatcher::new(),
}
}
/// Register an agent with capability description
#[wasm_bindgen(js_name = registerAgent)]
pub fn register_agent(&mut self, agent_id: &str, capabilities: &str) {
self.inner.register_agent(agent_id, capabilities);
}
/// Find best matching agent for a task, returns {agentId, score} or null
#[wasm_bindgen(js_name = matchAgent)]
pub fn match_agent(&self, task_description: &str) -> JsValue {
match self.inner.match_agent(task_description) {
Some((agent_id, score)) => {
let result = MatchResult { agent_id, score };
serde_wasm_bindgen::to_value(&result).unwrap_or(JsValue::NULL)
}
None => JsValue::NULL,
}
}
/// Get number of registered agents
#[wasm_bindgen(js_name = agentCount)]
pub fn agent_count(&self) -> usize {
// Return count from inner (we can't access private fields, so just return 0 for now)
0
}
}
impl Default for WasmSemanticMatcher {
fn default() -> Self {
Self::new()
}
}
#[derive(Serialize, Deserialize)]
struct MatchResult {
#[serde(rename = "agentId")]
agent_id: String,
score: f32,
}
// ============================================================================
// WASM Raft Consensus
// ============================================================================
/// WASM-compatible Raft consensus node
#[wasm_bindgen]
pub struct WasmRaftNode {
inner: RaftNode,
}
#[wasm_bindgen]
impl WasmRaftNode {
/// Create new Raft node with cluster members
#[wasm_bindgen(constructor)]
pub fn new(node_id: &str, members: JsValue) -> Result<WasmRaftNode, JsValue> {
let member_list: Vec<String> = serde_wasm_bindgen::from_value(members)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
Ok(Self {
inner: RaftNode::new(node_id, member_list),
})
}
/// Get current state (Follower, Candidate, Leader)
#[wasm_bindgen]
pub fn state(&self) -> String {
format!("{:?}", self.inner.state)
}
/// Get current term
#[wasm_bindgen]
pub fn term(&self) -> u64 {
self.inner.current_term
}
/// Check if this node is the leader
#[wasm_bindgen(js_name = isLeader)]
pub fn is_leader(&self) -> bool {
matches!(self.inner.state, RaftState::Leader)
}
/// Start an election (returns vote request as JSON)
#[wasm_bindgen(js_name = startElection)]
pub fn start_election(&mut self) -> JsValue {
let request = self.inner.start_election();
serde_wasm_bindgen::to_value(&request).unwrap_or(JsValue::NULL)
}
/// Handle a vote request (returns vote response as JSON)
#[wasm_bindgen(js_name = handleVoteRequest)]
pub fn handle_vote_request(&mut self, request: JsValue) -> Result<JsValue, JsValue> {
let req: crate::p2p::RaftVoteRequest = serde_wasm_bindgen::from_value(request)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
let response = self.inner.handle_vote_request(&req);
serde_wasm_bindgen::to_value(&response)
.map_err(|e| JsValue::from_str(&e.to_string()))
}
/// Handle a vote response (returns true if we became leader)
#[wasm_bindgen(js_name = handleVoteResponse)]
pub fn handle_vote_response(&mut self, response: JsValue) -> Result<bool, JsValue> {
let resp: crate::p2p::RaftVoteResponse = serde_wasm_bindgen::from_value(response)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
Ok(self.inner.handle_vote_response(&resp))
}
/// Append entry to log (leader only), returns log index or null
#[wasm_bindgen(js_name = appendEntry)]
pub fn append_entry(&mut self, data: &[u8]) -> JsValue {
match self.inner.append_entry(data.to_vec()) {
Some(index) => JsValue::from_f64(index as f64),
None => JsValue::NULL,
}
}
/// Get commit index
#[wasm_bindgen(js_name = getCommitIndex)]
pub fn get_commit_index(&self) -> u64 {
self.inner.commit_index
}
/// Get log length
#[wasm_bindgen(js_name = getLogLength)]
pub fn get_log_length(&self) -> usize {
self.inner.log.len()
}
}
// ============================================================================
// WASM Post-Quantum Crypto
// ============================================================================
/// WASM-compatible hybrid post-quantum signatures (Ed25519 + Dilithium-style)
#[wasm_bindgen]
pub struct WasmHybridKeyPair {
inner: HybridKeyPair,
}
#[wasm_bindgen]
impl WasmHybridKeyPair {
/// Generate new hybrid keypair
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
inner: HybridKeyPair::generate(),
}
}
/// Get public key bytes as hex
#[wasm_bindgen(js_name = publicKeyHex)]
pub fn public_key_hex(&self) -> String {
let pubkey_bytes = self.inner.public_key_bytes();
hex::encode(&pubkey_bytes.ed25519)
}
/// Sign message with hybrid signature
#[wasm_bindgen]
pub fn sign(&self, message: &[u8]) -> String {
let sig = self.inner.sign(message);
// Serialize the signature struct
serde_json::to_string(&sig).unwrap_or_default()
}
/// Verify hybrid signature (pubkey and signature both as JSON)
#[wasm_bindgen]
pub fn verify(public_key_json: &str, message: &[u8], signature_json: &str) -> bool {
let Ok(pubkey): Result<crate::p2p::HybridPublicKey, _> = serde_json::from_str(public_key_json) else {
return false;
};
let Ok(signature): Result<crate::p2p::HybridSignature, _> = serde_json::from_str(signature_json) else {
return false;
};
HybridKeyPair::verify(&pubkey, message, &signature)
}
}
impl Default for WasmHybridKeyPair {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// WASM Spiking Neural Network
// ============================================================================
/// WASM-compatible spiking neural network for temporal pattern recognition
#[wasm_bindgen]
pub struct WasmSpikingNetwork {
inner: SpikingNetwork,
}
#[wasm_bindgen]
impl WasmSpikingNetwork {
/// Create new spiking network
#[wasm_bindgen(constructor)]
pub fn new(input_size: usize, hidden_size: usize, output_size: usize) -> Self {
Self {
inner: SpikingNetwork::new(input_size, hidden_size, output_size),
}
}
/// Process input spikes and return output spikes
#[wasm_bindgen]
pub fn forward(&mut self, inputs: Vec<u8>) -> Vec<u8> {
let input_bools: Vec<bool> = inputs.iter().map(|&x| x != 0).collect();
let output_bools = self.inner.forward(&input_bools);
output_bools.iter().map(|&b| if b { 1 } else { 0 }).collect()
}
/// Apply STDP learning rule
#[wasm_bindgen(js_name = stdpUpdate)]
pub fn stdp_update(&mut self, pre: Vec<u8>, post: Vec<u8>, learning_rate: f32) {
let pre_bools: Vec<bool> = pre.iter().map(|&x| x != 0).collect();
let post_bools: Vec<bool> = post.iter().map(|&x| x != 0).collect();
self.inner.stdp_update(&pre_bools, &post_bools, learning_rate);
}
/// Reset network state
#[wasm_bindgen]
pub fn reset(&mut self) {
self.inner.reset();
}
}
// ============================================================================
// WASM Quantization
// ============================================================================
/// WASM-compatible vector quantization utilities
#[wasm_bindgen]
pub struct WasmQuantizer;
#[wasm_bindgen]
impl WasmQuantizer {
/// Binary quantize a vector (32x compression)
#[wasm_bindgen(js_name = binaryQuantize)]
pub fn binary_quantize(vector: Vec<f32>) -> Vec<u8> {
let quantized = BinaryQuantized::quantize(&vector);
quantized.bits.to_vec()
}
/// Scalar quantize a vector (4x compression)
#[wasm_bindgen(js_name = scalarQuantize)]
pub fn scalar_quantize(vector: Vec<f32>) -> JsValue {
let quantized = ScalarQuantized::quantize(&vector);
serde_wasm_bindgen::to_value(&ScalarQuantizedJs {
data: quantized.data.clone(),
min: quantized.min,
scale: quantized.scale,
}).unwrap_or(JsValue::NULL)
}
/// Reconstruct from scalar quantized
#[wasm_bindgen(js_name = scalarDequantize)]
pub fn scalar_dequantize(quantized: JsValue) -> Result<Vec<f32>, JsValue> {
let q: ScalarQuantizedJs = serde_wasm_bindgen::from_value(quantized)
.map_err(|e| JsValue::from_str(&e.to_string()))?;
let sq = ScalarQuantized {
data: q.data,
min: q.min,
scale: q.scale,
};
Ok(sq.reconstruct())
}
/// Compute hamming distance between binary quantized vectors
#[wasm_bindgen(js_name = hammingDistance)]
pub fn hamming_distance(a: &[u8], b: &[u8]) -> u32 {
a.iter()
.zip(b.iter())
.map(|(x, y)| (x ^ y).count_ones())
.sum()
}
}
#[derive(Serialize, Deserialize)]
struct ScalarQuantizedJs {
data: Vec<u8>,
min: f32,
scale: f32,
}
// ============================================================================
// WASM Adaptive Compressor
// ============================================================================
/// WASM-compatible network-aware adaptive compression
#[wasm_bindgen]
pub struct WasmAdaptiveCompressor {
inner: AdaptiveCompressor,
}
#[wasm_bindgen]
impl WasmAdaptiveCompressor {
/// Create new adaptive compressor
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
inner: AdaptiveCompressor::new(),
}
}
/// Update network metrics (bandwidth in Mbps, latency in ms)
#[wasm_bindgen(js_name = updateMetrics)]
pub fn update_metrics(&mut self, bandwidth_mbps: f32, latency_ms: f32) {
self.inner.update_metrics(bandwidth_mbps, latency_ms);
}
/// Get current network condition
#[wasm_bindgen]
pub fn condition(&self) -> String {
match self.inner.condition() {
NetworkCondition::Excellent => "excellent".to_string(),
NetworkCondition::Good => "good".to_string(),
NetworkCondition::Poor => "poor".to_string(),
NetworkCondition::Critical => "critical".to_string(),
}
}
/// Compress vector based on network conditions
#[wasm_bindgen]
pub fn compress(&self, data: Vec<f32>) -> JsValue {
let compressed = self.inner.compress(&data);
serde_wasm_bindgen::to_value(&compressed).unwrap_or(JsValue::NULL)
}
}
impl Default for WasmAdaptiveCompressor {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// Initialization
// ============================================================================
/// Initialize the WASM module (call once on load)
#[wasm_bindgen(start)]
pub fn init() {
// Set up panic hook for better error messages in console
#[cfg(feature = "console_error_panic_hook")]
console_error_panic_hook::set_once();
}
/// Get library version
#[wasm_bindgen]
pub fn version() -> String {
env!("CARGO_PKG_VERSION").to_string()
}