Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,356 @@
//! Artifact Store - Local CID-based Storage
//!
//! Security principles:
//! - LOCAL-ONLY mode by default (no gateway fetch)
//! - Real IPFS CID support requires multiformats (not yet implemented)
//! - LRU cache with configurable size limits
//! - Chunk-based storage for large artifacts
use crate::p2p::crypto::CryptoV2;
use crate::p2p::identity::IdentityManager;
use crate::p2p::envelope::{ArtifactPointer, ArtifactType};
use parking_lot::RwLock;
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use lz4_flex;
/// Chunk size for large artifacts (256KB)
pub const CHUNK_SIZE: usize = 256 * 1024;
/// Maximum artifact size (10MB)
pub const MAX_ARTIFACT_SIZE: usize = 10 * 1024 * 1024;
/// Default cache size (100MB)
pub const DEFAULT_CACHE_SIZE: usize = 100 * 1024 * 1024;
/// Stored artifact with metadata
#[derive(Debug, Clone)]
struct StoredArtifact {
data: Vec<u8>,
compressed: bool,
created_at: u64,
}
/// Artifact Store with LRU eviction
pub struct ArtifactStore {
cache: Arc<RwLock<HashMap<String, StoredArtifact>>>,
lru_order: Arc<RwLock<VecDeque<String>>>,
current_size: Arc<RwLock<usize>>,
max_cache_size: usize,
max_artifact_size: usize,
/// If true, allows gateway fetch (requires real IPFS CIDs)
/// Default: false (local-only mode)
enable_gateway_fetch: bool,
}
impl ArtifactStore {
/// Create new artifact store with default settings (local-only)
pub fn new() -> Self {
Self {
cache: Arc::new(RwLock::new(HashMap::new())),
lru_order: Arc::new(RwLock::new(VecDeque::new())),
current_size: Arc::new(RwLock::new(0)),
max_cache_size: DEFAULT_CACHE_SIZE,
max_artifact_size: MAX_ARTIFACT_SIZE,
enable_gateway_fetch: false, // LOCAL-ONLY by default
}
}
/// Create with custom settings
pub fn with_config(max_cache_size: usize, max_artifact_size: usize) -> Self {
Self {
cache: Arc::new(RwLock::new(HashMap::new())),
lru_order: Arc::new(RwLock::new(VecDeque::new())),
current_size: Arc::new(RwLock::new(0)),
max_cache_size,
max_artifact_size,
enable_gateway_fetch: false,
}
}
/// Store artifact and return local CID
pub fn store(&self, data: &[u8], compress: bool) -> Result<StoredArtifactInfo, String> {
if data.len() > self.max_artifact_size {
return Err(format!(
"Artifact too large: {} > {}",
data.len(),
self.max_artifact_size
));
}
let (stored_data, compressed) = if compress && data.len() > 1024 {
// Compress if larger than 1KB
match lz4_flex::compress_prepend_size(data) {
compressed if compressed.len() < data.len() => (compressed, true),
_ => (data.to_vec(), false),
}
} else {
(data.to_vec(), false)
};
// Generate local CID (NOT a real IPFS CID)
let cid = CryptoV2::generate_local_cid(data); // Hash of original, not compressed
let artifact = StoredArtifact {
data: stored_data.clone(),
compressed,
created_at: chrono::Utc::now().timestamp_millis() as u64,
};
// Evict if necessary
self.evict_if_needed(stored_data.len());
// Store
{
let mut cache = self.cache.write();
cache.insert(cid.clone(), artifact);
}
// Update LRU
{
let mut lru = self.lru_order.write();
lru.push_back(cid.clone());
}
// Update size
{
let mut size = self.current_size.write();
*size += stored_data.len();
}
let chunks = (data.len() + CHUNK_SIZE - 1) / CHUNK_SIZE;
Ok(StoredArtifactInfo {
cid,
original_size: data.len(),
stored_size: stored_data.len(),
compressed,
chunks,
})
}
/// Retrieve artifact by CID (local-only)
pub fn retrieve(&self, cid: &str) -> Option<Vec<u8>> {
// Only check local cache in local-only mode
let cache = self.cache.read();
let artifact = cache.get(cid)?;
// Update LRU (move to back)
{
let mut lru = self.lru_order.write();
if let Some(pos) = lru.iter().position(|c| c == cid) {
lru.remove(pos);
lru.push_back(cid.to_string());
}
}
// Decompress if needed
if artifact.compressed {
lz4_flex::decompress_size_prepended(&artifact.data).ok()
} else {
Some(artifact.data.clone())
}
}
/// Check if artifact exists locally
pub fn exists(&self, cid: &str) -> bool {
self.cache.read().contains_key(cid)
}
/// Delete artifact
pub fn delete(&self, cid: &str) -> bool {
let mut cache = self.cache.write();
if let Some(artifact) = cache.remove(cid) {
let mut size = self.current_size.write();
*size = size.saturating_sub(artifact.data.len());
let mut lru = self.lru_order.write();
if let Some(pos) = lru.iter().position(|c| c == cid) {
lru.remove(pos);
}
true
} else {
false
}
}
/// Get cache statistics
pub fn stats(&self) -> ArtifactStoreStats {
let cache = self.cache.read();
ArtifactStoreStats {
artifact_count: cache.len(),
current_size: *self.current_size.read(),
max_size: self.max_cache_size,
utilization: *self.current_size.read() as f64 / self.max_cache_size as f64,
}
}
/// Evict oldest artifacts if needed
fn evict_if_needed(&self, new_size: usize) {
let mut size = self.current_size.write();
let mut lru = self.lru_order.write();
let mut cache = self.cache.write();
while *size + new_size > self.max_cache_size && !lru.is_empty() {
if let Some(oldest_cid) = lru.pop_front() {
if let Some(artifact) = cache.remove(&oldest_cid) {
*size = size.saturating_sub(artifact.data.len());
tracing::debug!("Evicted artifact: {}", oldest_cid);
}
}
}
}
/// Create artifact pointer for publishing
pub fn create_pointer(
&self,
artifact_type: ArtifactType,
agent_id: &str,
cid: &str,
dimensions: &str,
identity: &IdentityManager,
) -> Option<ArtifactPointer> {
let cache = self.cache.read();
let artifact = cache.get(cid)?;
let data = if artifact.compressed {
lz4_flex::decompress_size_prepended(&artifact.data).ok()?
} else {
artifact.data.clone()
};
// Compute checksum (first 16 bytes of hash)
let hash = CryptoV2::hash(&data);
let mut checksum = [0u8; 16];
checksum.copy_from_slice(&hash[..16]);
// Schema hash (first 8 bytes of type name hash)
let type_name = format!("{:?}", artifact_type);
let type_hash = CryptoV2::hash(type_name.as_bytes());
let mut schema_hash = [0u8; 8];
schema_hash.copy_from_slice(&type_hash[..8]);
let mut pointer = ArtifactPointer {
artifact_type,
agent_id: agent_id.to_string(),
cid: cid.to_string(),
version: 1,
schema_hash,
dimensions: dimensions.to_string(),
checksum,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
signature: [0u8; 64],
};
// Sign the pointer
let canonical = pointer.canonical_for_signing();
pointer.signature = identity.sign(canonical.as_bytes());
Some(pointer)
}
/// Clear all artifacts
pub fn clear(&self) {
self.cache.write().clear();
self.lru_order.write().clear();
*self.current_size.write() = 0;
}
}
impl Default for ArtifactStore {
fn default() -> Self {
Self::new()
}
}
/// Information about stored artifact
#[derive(Debug, Clone)]
pub struct StoredArtifactInfo {
pub cid: String,
pub original_size: usize,
pub stored_size: usize,
pub compressed: bool,
pub chunks: usize,
}
/// Artifact store statistics
#[derive(Debug, Clone)]
pub struct ArtifactStoreStats {
pub artifact_count: usize,
pub current_size: usize,
pub max_size: usize,
pub utilization: f64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_store_and_retrieve() {
let store = ArtifactStore::new();
let data = b"Hello, World!";
let info = store.store(data, false).unwrap();
assert!(info.cid.starts_with("local:"));
let retrieved = store.retrieve(&info.cid).unwrap();
assert_eq!(data.as_slice(), retrieved.as_slice());
}
#[test]
fn test_compression() {
let store = ArtifactStore::new();
// Create data that compresses well
let data = vec![0u8; 10000];
let info = store.store(&data, true).unwrap();
assert!(info.compressed);
assert!(info.stored_size < info.original_size);
let retrieved = store.retrieve(&info.cid).unwrap();
assert_eq!(data, retrieved);
}
#[test]
fn test_size_limit() {
// Set max_artifact_size to 100 bytes
let store = ArtifactStore::with_config(1024, 100);
// Store artifact too large should fail
let data = vec![0u8; 200];
let result = store.store(&data, false);
assert!(result.is_err()); // Should fail due to max_artifact_size
// Small artifact should succeed
let small_data = vec![0u8; 50];
let result = store.store(&small_data, false);
assert!(result.is_ok());
}
#[test]
fn test_eviction() {
// Small cache for testing eviction
let store = ArtifactStore::with_config(200, 100);
// Store multiple small artifacts
let info1 = store.store(b"artifact 1", false).unwrap();
let info2 = store.store(b"artifact 2", false).unwrap();
let info3 = store.store(b"artifact 3", false).unwrap();
// All should be retrievable (cache is large enough)
assert!(store.exists(&info1.cid));
assert!(store.exists(&info2.cid));
assert!(store.exists(&info3.cid));
}
#[test]
fn test_local_only_mode() {
let store = ArtifactStore::new();
// Non-existent CID should return None (no gateway fetch)
let result = store.retrieve("local:nonexistent");
assert!(result.is_none());
}
}

View File

@@ -0,0 +1,246 @@
//! Cryptographic Primitives - AES-256-GCM + Canonical Serialization
//!
//! Security principles:
//! - AES-256-GCM authenticated encryption
//! - Canonical JSON with sorted keys (not relying on insertion order)
//! - Proper IV/nonce handling
use aes_gcm::{
aead::{Aead, KeyInit},
Aes256Gcm, Nonce,
};
use sha2::{Sha256, Digest};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use rand::RngCore;
/// Encrypted payload with IV and auth tag
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EncryptedPayload {
pub ciphertext: Vec<u8>,
pub iv: [u8; 12],
pub tag: [u8; 16],
}
/// Crypto primitives for P2P communication
pub struct CryptoV2;
impl CryptoV2 {
/// Encrypt data with AES-256-GCM
pub fn encrypt(data: &[u8], key: &[u8; 32]) -> Result<EncryptedPayload, String> {
let cipher = Aes256Gcm::new_from_slice(key)
.map_err(|e| format!("Invalid key: {}", e))?;
// Generate random IV
let mut iv = [0u8; 12];
rand::rngs::OsRng.fill_bytes(&mut iv);
let nonce = Nonce::from_slice(&iv);
// Encrypt (GCM includes auth tag in output)
let ciphertext_with_tag = cipher.encrypt(nonce, data)
.map_err(|e| format!("Encryption failed: {}", e))?;
// Split ciphertext and tag (tag is last 16 bytes)
let tag_start = ciphertext_with_tag.len() - 16;
let ciphertext = ciphertext_with_tag[..tag_start].to_vec();
let mut tag = [0u8; 16];
tag.copy_from_slice(&ciphertext_with_tag[tag_start..]);
Ok(EncryptedPayload { ciphertext, iv, tag })
}
/// Decrypt data with AES-256-GCM
pub fn decrypt(encrypted: &EncryptedPayload, key: &[u8; 32]) -> Result<Vec<u8>, String> {
let cipher = Aes256Gcm::new_from_slice(key)
.map_err(|e| format!("Invalid key: {}", e))?;
let nonce = Nonce::from_slice(&encrypted.iv);
// Reconstruct ciphertext with tag
let mut ciphertext_with_tag = encrypted.ciphertext.clone();
ciphertext_with_tag.extend_from_slice(&encrypted.tag);
cipher.decrypt(nonce, ciphertext_with_tag.as_ref())
.map_err(|_| "Decryption failed: authentication failed".to_string())
}
/// SHA-256 hash
pub fn hash(data: &[u8]) -> [u8; 32] {
let mut hasher = Sha256::new();
hasher.update(data);
hasher.finalize().into()
}
/// SHA-256 hash as hex string
pub fn hash_hex(data: &[u8]) -> String {
hex::encode(Self::hash(data))
}
/// Generate a simplified CID (local-only, not real IPFS)
/// For real IPFS, use multiformats library
pub fn generate_local_cid(data: &[u8]) -> String {
let hash = Self::hash_hex(data);
format!("local:{}", &hash[..32])
}
}
/// Canonical JSON serialization with sorted keys
/// This is critical for signature verification
pub struct CanonicalJson;
impl CanonicalJson {
/// Serialize value to canonical JSON (sorted keys, no spaces)
pub fn stringify(value: &Value) -> String {
Self::stringify_value(value)
}
/// Parse and re-serialize to canonical form
pub fn canonicalize(json_str: &str) -> Result<String, serde_json::Error> {
let value: Value = serde_json::from_str(json_str)?;
Ok(Self::stringify(&value))
}
/// Serialize any struct to canonical JSON
pub fn serialize<T: Serialize>(value: &T) -> Result<String, serde_json::Error> {
let json_value = serde_json::to_value(value)?;
Ok(Self::stringify(&json_value))
}
fn stringify_value(value: &Value) -> String {
match value {
Value::Null => "null".to_string(),
Value::Bool(b) => if *b { "true" } else { "false" }.to_string(),
Value::Number(n) => n.to_string(),
Value::String(s) => Self::stringify_string(s),
Value::Array(arr) => Self::stringify_array(arr),
Value::Object(obj) => Self::stringify_object(obj),
}
}
fn stringify_string(s: &str) -> String {
// Proper JSON string escaping
let mut result = String::with_capacity(s.len() + 2);
result.push('"');
for c in s.chars() {
match c {
'"' => result.push_str("\\\""),
'\\' => result.push_str("\\\\"),
'\n' => result.push_str("\\n"),
'\r' => result.push_str("\\r"),
'\t' => result.push_str("\\t"),
c if c.is_control() => {
result.push_str(&format!("\\u{:04x}", c as u32));
}
c => result.push(c),
}
}
result.push('"');
result
}
fn stringify_array(arr: &[Value]) -> String {
let items: Vec<String> = arr.iter().map(Self::stringify_value).collect();
format!("[{}]", items.join(","))
}
fn stringify_object(obj: &serde_json::Map<String, Value>) -> String {
// Sort keys alphabetically for deterministic output
let mut keys: Vec<&String> = obj.keys().collect();
keys.sort();
let pairs: Vec<String> = keys.iter()
.filter_map(|k| obj.get(*k).map(|v| {
format!("{}:{}", Self::stringify_string(k), Self::stringify_value(v))
}))
.collect();
format!("{{{}}}", pairs.join(","))
}
}
/// Compute hash of canonical JSON representation
pub fn canonical_hash<T: Serialize>(value: &T) -> Result<[u8; 32], serde_json::Error> {
let canonical = CanonicalJson::serialize(value)?;
Ok(CryptoV2::hash(canonical.as_bytes()))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encrypt_decrypt() {
let key = [1u8; 32];
let data = b"Hello, World!";
let encrypted = CryptoV2::encrypt(data, &key).unwrap();
let decrypted = CryptoV2::decrypt(&encrypted, &key).unwrap();
assert_eq!(data.as_slice(), decrypted.as_slice());
}
#[test]
fn test_decrypt_wrong_key_fails() {
let key1 = [1u8; 32];
let key2 = [2u8; 32];
let data = b"Secret data";
let encrypted = CryptoV2::encrypt(data, &key1).unwrap();
let result = CryptoV2::decrypt(&encrypted, &key2);
assert!(result.is_err());
}
#[test]
fn test_canonical_json_sorted_keys() {
let json = serde_json::json!({
"z": 1,
"a": 2,
"m": 3
});
let canonical = CanonicalJson::stringify(&json);
assert_eq!(canonical, r#"{"a":2,"m":3,"z":1}"#);
}
#[test]
fn test_canonical_json_nested() {
let json = serde_json::json!({
"b": {
"z": 1,
"a": 2
},
"a": [3, 2, 1]
});
let canonical = CanonicalJson::stringify(&json);
assert_eq!(canonical, r#"{"a":[3,2,1],"b":{"a":2,"z":1}}"#);
}
#[test]
fn test_canonical_json_escaping() {
let json = serde_json::json!({
"text": "hello\nworld"
});
let canonical = CanonicalJson::stringify(&json);
assert_eq!(canonical, r#"{"text":"hello\nworld"}"#);
}
#[test]
fn test_canonical_hash_deterministic() {
#[derive(Serialize)]
struct Data {
z: u32,
a: u32,
}
let data1 = Data { z: 1, a: 2 };
let data2 = Data { z: 1, a: 2 };
let hash1 = canonical_hash(&data1).unwrap();
let hash2 = canonical_hash(&data2).unwrap();
assert_eq!(hash1, hash2);
}
}

View File

@@ -0,0 +1,479 @@
//! Message Envelopes - Signed and Encrypted Messages
//!
//! Security principles:
//! - All messages are signed with Ed25519
//! - Signatures cover canonical representation of all fields
//! - TaskReceipt includes full execution binding
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use crate::p2p::crypto::{EncryptedPayload, CanonicalJson, CryptoV2};
/// Signed message envelope for P2P communication
///
/// Design:
/// - Header fields are signed but not encrypted
/// - Payload is encrypted with swarm key (for broadcast) or session key (for direct)
/// - Sender identity verified via registry, NOT from this envelope
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SignedEnvelope {
// Header (signed but not encrypted)
pub message_id: String,
pub topic: String,
pub timestamp: u64,
pub sender_id: String,
#[serde(with = "hex::serde")]
pub payload_hash: [u8; 32],
pub nonce: String,
pub counter: u64,
// Signature covers canonical representation of all header fields
#[serde(with = "BigArray")]
pub signature: [u8; 64],
// Encrypted payload (swarm key or session key)
pub encrypted: EncryptedPayload,
}
impl SignedEnvelope {
/// Create canonical representation of header for signing
/// Keys are sorted alphabetically for deterministic output
pub fn canonical_header(&self) -> String {
// Create a struct with sorted fields for canonical serialization
let header = serde_json::json!({
"counter": self.counter,
"message_id": self.message_id,
"nonce": self.nonce,
"payload_hash": hex::encode(self.payload_hash),
"sender_id": self.sender_id,
"timestamp": self.timestamp,
"topic": self.topic,
});
CanonicalJson::stringify(&header)
}
/// Create unsigned envelope (for signing externally)
pub fn new_unsigned(
message_id: String,
topic: String,
sender_id: String,
payload: &[u8],
nonce: String,
counter: u64,
encrypted: EncryptedPayload,
) -> Self {
Self {
message_id,
topic,
timestamp: chrono::Utc::now().timestamp_millis() as u64,
sender_id,
payload_hash: CryptoV2::hash(payload),
nonce,
counter,
signature: [0u8; 64],
encrypted,
}
}
}
/// Task execution envelope with resource budgets
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskEnvelope {
pub task_id: String,
pub module_cid: String, // WASM module location
pub entrypoint: String, // Function to call
pub input_cid: String, // Input data location
#[serde(with = "hex::serde")]
pub output_schema_hash: [u8; 32],
/// Resource budgets for sandbox
pub budgets: TaskBudgets,
/// Requester info
pub requester: String,
pub deadline: u64,
pub priority: u8,
/// Canonical hash of this envelope (for receipts)
#[serde(with = "hex::serde")]
pub envelope_hash: [u8; 32],
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskBudgets {
pub fuel_limit: u64, // Wasmtime fuel
pub memory_mb: u32, // Max memory in MB
pub timeout_ms: u64, // Max execution time
}
impl TaskEnvelope {
/// Create task envelope with computed hash
pub fn new(
task_id: String,
module_cid: String,
entrypoint: String,
input_cid: String,
output_schema_hash: [u8; 32],
budgets: TaskBudgets,
requester: String,
deadline: u64,
priority: u8,
) -> Self {
let mut envelope = Self {
task_id,
module_cid,
entrypoint,
input_cid,
output_schema_hash,
budgets,
requester,
deadline,
priority,
envelope_hash: [0u8; 32],
};
// Compute envelope hash (excluding envelope_hash field)
envelope.envelope_hash = envelope.compute_hash();
envelope
}
/// Compute canonical hash of envelope (excluding envelope_hash itself)
fn compute_hash(&self) -> [u8; 32] {
let for_hash = serde_json::json!({
"budgets": {
"fuel_limit": self.budgets.fuel_limit,
"memory_mb": self.budgets.memory_mb,
"timeout_ms": self.budgets.timeout_ms,
},
"deadline": self.deadline,
"entrypoint": self.entrypoint,
"input_cid": self.input_cid,
"module_cid": self.module_cid,
"output_schema_hash": hex::encode(self.output_schema_hash),
"priority": self.priority,
"requester": self.requester,
"task_id": self.task_id,
});
let canonical = CanonicalJson::stringify(&for_hash);
CryptoV2::hash(canonical.as_bytes())
}
}
/// Task result receipt with full execution binding
///
/// Security: Signature covers ALL fields to prevent tampering
/// Including binding to original TaskEnvelope for traceability
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskReceipt {
// Core result
pub task_id: String,
pub executor: String,
pub result_cid: String,
pub status: TaskStatus,
// Resource usage
pub fuel_used: u64,
pub memory_peak_mb: u32,
pub execution_ms: u64,
// Execution binding (proves this receipt is for this specific execution)
#[serde(with = "hex::serde")]
pub input_hash: [u8; 32],
#[serde(with = "hex::serde")]
pub output_hash: [u8; 32],
#[serde(with = "hex::serde")]
pub module_hash: [u8; 32],
pub start_timestamp: u64,
pub end_timestamp: u64,
// TaskEnvelope binding (proves this receipt matches original task)
pub module_cid: String,
pub input_cid: String,
pub entrypoint: String,
#[serde(with = "hex::serde")]
pub output_schema_hash: [u8; 32],
#[serde(with = "hex::serde")]
pub task_envelope_hash: [u8; 32],
// Signature covers ALL fields above
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum TaskStatus {
Success,
Error,
Timeout,
OutOfMemory,
}
impl TaskReceipt {
/// Create canonical representation for signing
/// Includes ALL fields for full execution binding
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"end_timestamp": self.end_timestamp,
"entrypoint": self.entrypoint,
"execution_ms": self.execution_ms,
"executor": self.executor,
"fuel_used": self.fuel_used,
"input_cid": self.input_cid,
"input_hash": hex::encode(self.input_hash),
"memory_peak_mb": self.memory_peak_mb,
"module_cid": self.module_cid,
"module_hash": hex::encode(self.module_hash),
"output_hash": hex::encode(self.output_hash),
"output_schema_hash": hex::encode(self.output_schema_hash),
"result_cid": self.result_cid,
"start_timestamp": self.start_timestamp,
"status": format!("{:?}", self.status),
"task_envelope_hash": hex::encode(self.task_envelope_hash),
"task_id": self.task_id,
});
CanonicalJson::stringify(&for_signing)
}
/// Create unsigned receipt (for signing externally)
pub fn new_unsigned(
task: &TaskEnvelope,
executor: String,
result_cid: String,
status: TaskStatus,
fuel_used: u64,
memory_peak_mb: u32,
execution_ms: u64,
input_hash: [u8; 32],
output_hash: [u8; 32],
module_hash: [u8; 32],
start_timestamp: u64,
end_timestamp: u64,
) -> Self {
Self {
task_id: task.task_id.clone(),
executor,
result_cid,
status,
fuel_used,
memory_peak_mb,
execution_ms,
input_hash,
output_hash,
module_hash,
start_timestamp,
end_timestamp,
module_cid: task.module_cid.clone(),
input_cid: task.input_cid.clone(),
entrypoint: task.entrypoint.clone(),
output_schema_hash: task.output_schema_hash,
task_envelope_hash: task.envelope_hash,
signature: [0u8; 64],
}
}
}
/// Signaling message for WebRTC (via GUN)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SignalingMessage {
pub signal_type: SignalType,
pub from: String,
pub to: String,
pub payload: String,
#[serde(with = "hex::serde")]
pub payload_hash: [u8; 32],
pub timestamp: u64,
pub expires_at: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum SignalType {
Offer,
Answer,
Ice,
}
impl SignalingMessage {
/// Create canonical representation for signing
/// Does NOT include sender_pubkey - verified via registry
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"expires_at": self.expires_at,
"from": self.from,
"payload_hash": hex::encode(self.payload_hash),
"signal_type": format!("{:?}", self.signal_type),
"timestamp": self.timestamp,
"to": self.to,
});
CanonicalJson::stringify(&for_signing)
}
/// Create unsigned signaling message
pub fn new_unsigned(
signal_type: SignalType,
from: String,
to: String,
payload: String,
ttl_ms: u64,
) -> Self {
let now = chrono::Utc::now().timestamp_millis() as u64;
Self {
signal_type,
from,
to,
payload_hash: CryptoV2::hash(payload.as_bytes()),
payload,
timestamp: now,
expires_at: now + ttl_ms,
signature: [0u8; 64],
}
}
/// Check if signal is expired
pub fn is_expired(&self) -> bool {
let now = chrono::Utc::now().timestamp_millis() as u64;
now > self.expires_at
}
}
/// Artifact pointer (small metadata that goes to GUN)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ArtifactPointer {
pub artifact_type: ArtifactType,
pub agent_id: String,
pub cid: String,
pub version: u32,
#[serde(with = "hex::serde")]
pub schema_hash: [u8; 8],
pub dimensions: String,
#[serde(with = "hex::serde")]
pub checksum: [u8; 16],
pub timestamp: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ArtifactType {
QTable,
MemoryVectors,
ModelWeights,
Trajectory,
}
impl ArtifactPointer {
/// Create canonical representation for signing
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"agent_id": self.agent_id,
"artifact_type": format!("{:?}", self.artifact_type),
"checksum": hex::encode(self.checksum),
"cid": self.cid,
"dimensions": self.dimensions,
"schema_hash": hex::encode(self.schema_hash),
"timestamp": self.timestamp,
"version": self.version,
});
CanonicalJson::stringify(&for_signing)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_envelope_canonical_header() {
let encrypted = EncryptedPayload {
ciphertext: vec![1, 2, 3],
iv: [0u8; 12],
tag: [0u8; 16],
};
let envelope = SignedEnvelope::new_unsigned(
"msg-001".to_string(),
"test-topic".to_string(),
"sender-001".to_string(),
b"test payload",
"abc123".to_string(),
1,
encrypted,
);
let canonical1 = envelope.canonical_header();
let canonical2 = envelope.canonical_header();
// Must be deterministic
assert_eq!(canonical1, canonical2);
// Must contain all fields
assert!(canonical1.contains("msg-001"));
assert!(canonical1.contains("test-topic"));
assert!(canonical1.contains("sender-001"));
}
#[test]
fn test_task_receipt_includes_all_binding_fields() {
let task = TaskEnvelope::new(
"task-001".to_string(),
"local:abc123".to_string(),
"process".to_string(),
"local:input123".to_string(),
[0u8; 32],
TaskBudgets {
fuel_limit: 1000000,
memory_mb: 128,
timeout_ms: 30000,
},
"requester-001".to_string(),
chrono::Utc::now().timestamp_millis() as u64 + 60000,
1,
);
let receipt = TaskReceipt::new_unsigned(
&task,
"executor-001".to_string(),
"local:result123".to_string(),
TaskStatus::Success,
500000,
64,
1500,
[1u8; 32],
[2u8; 32],
[3u8; 32],
1000,
2500,
);
let canonical = receipt.canonical_for_signing();
// Must include execution binding fields
assert!(canonical.contains("input_hash"));
assert!(canonical.contains("output_hash"));
assert!(canonical.contains("module_hash"));
// Must include task envelope binding
assert!(canonical.contains("module_cid"));
assert!(canonical.contains("input_cid"));
assert!(canonical.contains("entrypoint"));
assert!(canonical.contains("task_envelope_hash"));
}
#[test]
fn test_signaling_message_no_pubkey_in_signature() {
let signal = SignalingMessage::new_unsigned(
SignalType::Offer,
"alice".to_string(),
"bob".to_string(),
"sdp data here".to_string(),
60000,
);
let canonical = signal.canonical_for_signing();
// Should NOT contain any pubkey field
assert!(!canonical.contains("pubkey"));
assert!(!canonical.contains("public_key"));
}
}

View File

@@ -0,0 +1,431 @@
//! Identity Manager - Ed25519 + X25519 Key Management
//!
//! Security principles:
//! - Ed25519 for signing (identity keys)
//! - X25519 for key exchange (session keys)
//! - Never trust pubkeys from envelopes - only from signed registry
//! - Per-sender nonce tracking with timestamps for expiry
//! - Separate send/receive counters
use ed25519_dalek::{SigningKey, VerifyingKey, Signature, Signer, Verifier};
use x25519_dalek::{StaticSecret, PublicKey as X25519PublicKey};
use hkdf::Hkdf;
use sha2::Sha256;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
/// Ed25519 key pair for identity/signing
#[derive(Clone)]
pub struct KeyPair {
signing_key: SigningKey,
verifying_key: VerifyingKey,
}
impl KeyPair {
pub fn generate() -> Self {
let signing_key = SigningKey::generate(&mut OsRng);
let verifying_key = signing_key.verifying_key();
Self { signing_key, verifying_key }
}
pub fn public_key_bytes(&self) -> [u8; 32] {
self.verifying_key.to_bytes()
}
pub fn public_key_base64(&self) -> String {
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, self.public_key_bytes())
}
pub fn sign(&self, message: &[u8]) -> Signature {
self.signing_key.sign(message)
}
pub fn verify(public_key: &[u8; 32], message: &[u8], signature: &[u8; 64]) -> bool {
let Ok(verifying_key) = VerifyingKey::from_bytes(public_key) else {
return false;
};
let sig = Signature::from_bytes(signature);
verifying_key.verify(message, &sig).is_ok()
}
}
/// Registered member in the swarm (from signed registry)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RegisteredMember {
pub agent_id: String,
#[serde(with = "hex::serde")]
pub ed25519_public_key: [u8; 32],
#[serde(with = "hex::serde")]
pub x25519_public_key: [u8; 32],
pub capabilities: Vec<String>,
pub joined_at: u64,
pub last_heartbeat: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
impl RegisteredMember {
/// Verify the registration signature
/// Signature covers: agent_id || ed25519_key || x25519_key || capabilities || joined_at
pub fn verify(&self) -> bool {
let canonical = self.canonical_data();
KeyPair::verify(&self.ed25519_public_key, &canonical, &self.signature)
}
/// Create canonical data for signing
pub fn canonical_data(&self) -> Vec<u8> {
let mut data = Vec::new();
data.extend_from_slice(self.agent_id.as_bytes());
data.push(0); // separator
data.extend_from_slice(&self.ed25519_public_key);
data.extend_from_slice(&self.x25519_public_key);
for cap in &self.capabilities {
data.extend_from_slice(cap.as_bytes());
data.push(0);
}
data.extend_from_slice(&self.joined_at.to_le_bytes());
data
}
}
/// Per-sender nonce tracking entry
struct NonceEntry {
timestamp: u64,
}
/// Identity Manager with registry-based trust
pub struct IdentityManager {
/// Our Ed25519 identity keypair
identity_key: KeyPair,
/// Our X25519 key for ECDH
x25519_secret: StaticSecret,
x25519_public: X25519PublicKey,
/// Derived session keys per peer (cache)
session_keys: Arc<RwLock<HashMap<String, [u8; 32]>>>,
/// Registry of verified members - THE SOURCE OF TRUTH
/// Never trust keys from envelopes, only from here
member_registry: Arc<RwLock<HashMap<String, RegisteredMember>>>,
/// Per-sender nonce tracking: senderId -> (nonce -> entry)
seen_nonces: Arc<RwLock<HashMap<String, HashMap<String, NonceEntry>>>>,
/// Local monotonic send counter
send_counter: Arc<RwLock<u64>>,
/// Per-peer receive counters
recv_counters: Arc<RwLock<HashMap<String, u64>>>,
/// Max nonce age (5 minutes)
max_nonce_age_ms: u64,
}
impl IdentityManager {
pub fn new() -> Self {
let identity_key = KeyPair::generate();
let x25519_secret = StaticSecret::random_from_rng(OsRng);
let x25519_public = X25519PublicKey::from(&x25519_secret);
Self {
identity_key,
x25519_secret,
x25519_public,
session_keys: Arc::new(RwLock::new(HashMap::new())),
member_registry: Arc::new(RwLock::new(HashMap::new())),
seen_nonces: Arc::new(RwLock::new(HashMap::new())),
send_counter: Arc::new(RwLock::new(0)),
recv_counters: Arc::new(RwLock::new(HashMap::new())),
max_nonce_age_ms: 300_000, // 5 minutes
}
}
/// Get our Ed25519 public key
pub fn public_key(&self) -> [u8; 32] {
self.identity_key.public_key_bytes()
}
/// Get our X25519 public key
pub fn x25519_public_key(&self) -> [u8; 32] {
self.x25519_public.to_bytes()
}
/// Sign data with our identity key
pub fn sign(&self, data: &[u8]) -> [u8; 64] {
self.identity_key.sign(data).to_bytes()
}
/// Verify signature using ONLY the registry key
/// Never use a key from the message itself
pub fn verify_from_registry(&self, sender_id: &str, data: &[u8], signature: &[u8; 64]) -> bool {
let registry = self.member_registry.read();
let Some(member) = registry.get(sender_id) else {
tracing::warn!("verify_from_registry: sender not in registry: {}", sender_id);
return false;
};
KeyPair::verify(&member.ed25519_public_key, data, signature)
}
/// Register a member from a signed registration
/// Verifies the signature before adding
pub fn register_member(&self, member: RegisteredMember) -> bool {
if !member.verify() {
tracing::warn!("register_member: invalid signature for {}", member.agent_id);
return false;
}
let mut registry = self.member_registry.write();
// If already registered, only accept if from same key
if let Some(existing) = registry.get(&member.agent_id) {
if existing.ed25519_public_key != member.ed25519_public_key {
tracing::warn!("register_member: key mismatch for {}", member.agent_id);
return false;
}
}
registry.insert(member.agent_id.clone(), member);
true
}
/// Get registered member by ID
pub fn get_member(&self, agent_id: &str) -> Option<RegisteredMember> {
self.member_registry.read().get(agent_id).cloned()
}
/// Update member heartbeat
pub fn update_heartbeat(&self, agent_id: &str) {
let mut registry = self.member_registry.write();
if let Some(member) = registry.get_mut(agent_id) {
member.last_heartbeat = chrono::Utc::now().timestamp_millis() as u64;
}
}
/// Get active members (heartbeat within threshold)
pub fn get_active_members(&self, heartbeat_threshold_ms: u64) -> Vec<RegisteredMember> {
let now = chrono::Utc::now().timestamp_millis() as u64;
let registry = self.member_registry.read();
registry.values()
.filter(|m| now - m.last_heartbeat < heartbeat_threshold_ms)
.cloned()
.collect()
}
/// Create our registration (for publishing to registry)
pub fn create_registration(&self, agent_id: &str, capabilities: Vec<String>) -> RegisteredMember {
let joined_at = chrono::Utc::now().timestamp_millis() as u64;
let mut member = RegisteredMember {
agent_id: agent_id.to_string(),
ed25519_public_key: self.public_key(),
x25519_public_key: self.x25519_public_key(),
capabilities,
joined_at,
last_heartbeat: joined_at,
signature: [0u8; 64],
};
// Sign the canonical data
let canonical = member.canonical_data();
member.signature = self.sign(&canonical);
member
}
/// Derive session key with peer using X25519 ECDH + HKDF
/// Uses ONLY the X25519 key from the registry
pub fn derive_session_key(&self, peer_id: &str, swarm_id: &str) -> Option<[u8; 32]> {
let cache_key = format!("{}:{}", peer_id, swarm_id);
// Check cache
{
let cache = self.session_keys.read();
if let Some(key) = cache.get(&cache_key) {
return Some(*key);
}
}
// Get peer's X25519 key from registry ONLY
let registry = self.member_registry.read();
let peer = registry.get(peer_id)?;
let peer_x25519 = X25519PublicKey::from(peer.x25519_public_key);
// X25519 ECDH
let shared_secret = self.x25519_secret.diffie_hellman(&peer_x25519);
// HKDF with stable salt from both public keys
let my_key = self.x25519_public.as_bytes();
let peer_key = peer.x25519_public_key;
// Salt = sha256(min(pubA, pubB) || max(pubA, pubB))
use sha2::Digest;
let salt = if my_key < &peer_key {
let mut hasher = Sha256::new();
hasher.update(my_key);
hasher.update(&peer_key);
hasher.finalize()
} else {
let mut hasher = Sha256::new();
hasher.update(&peer_key);
hasher.update(my_key);
hasher.finalize()
};
// Info only includes swarm_id (salt already includes both parties' public keys for pair separation)
// This ensures both parties derive the same key
let info = format!("p2p-swarm-v2:{}", swarm_id);
let hkdf = Hkdf::<Sha256>::new(Some(&salt), shared_secret.as_bytes());
let mut session_key = [0u8; 32];
hkdf.expand(info.as_bytes(), &mut session_key).ok()?;
// Cache
self.session_keys.write().insert(cache_key, session_key);
Some(session_key)
}
/// Generate cryptographic nonce
pub fn generate_nonce() -> String {
let mut bytes = [0u8; 16];
rand::RngCore::fill_bytes(&mut OsRng, &mut bytes);
hex::encode(bytes)
}
/// Check nonce validity with per-sender tracking
pub fn check_nonce(&self, nonce: &str, timestamp: u64, sender_id: &str) -> bool {
let now = chrono::Utc::now().timestamp_millis() as u64;
// Reject old messages
if now.saturating_sub(timestamp) > self.max_nonce_age_ms {
return false;
}
// Reject future timestamps (1 minute tolerance for clock skew)
if timestamp > now + 60_000 {
return false;
}
let mut nonces = self.seen_nonces.write();
let sender_nonces = nonces.entry(sender_id.to_string()).or_insert_with(HashMap::new);
// Reject replayed nonces
if sender_nonces.contains_key(nonce) {
return false;
}
// Record nonce
sender_nonces.insert(nonce.to_string(), NonceEntry { timestamp });
true
}
/// Cleanup expired nonces
pub fn cleanup_nonces(&self) {
let now = chrono::Utc::now().timestamp_millis() as u64;
let mut nonces = self.seen_nonces.write();
for sender_nonces in nonces.values_mut() {
sender_nonces.retain(|_, entry| now - entry.timestamp < self.max_nonce_age_ms);
}
nonces.retain(|_, v| !v.is_empty());
}
/// Get next send counter
pub fn next_send_counter(&self) -> u64 {
let mut counter = self.send_counter.write();
*counter += 1;
*counter
}
/// Validate receive counter (must be > last seen from peer)
pub fn validate_recv_counter(&self, peer_id: &str, counter: u64) -> bool {
let mut counters = self.recv_counters.write();
let last_seen = counters.get(peer_id).copied().unwrap_or(0);
if counter <= last_seen {
return false;
}
counters.insert(peer_id.to_string(), counter);
true
}
/// Rotate session key for peer
pub fn rotate_session_key(&self, peer_id: &str) {
self.session_keys.write().retain(|k, _| !k.starts_with(peer_id));
}
}
impl Default for IdentityManager {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_keypair_sign_verify() {
let keypair = KeyPair::generate();
let message = b"test message";
let signature = keypair.sign(message);
assert!(KeyPair::verify(
&keypair.public_key_bytes(),
message,
&signature.to_bytes()
));
}
#[test]
fn test_member_registration() {
let identity = IdentityManager::new();
let registration = identity.create_registration("test-agent", vec!["executor".to_string()]);
assert!(registration.verify());
assert!(identity.register_member(registration));
}
#[test]
fn test_session_key_derivation() {
let alice = IdentityManager::new();
let bob = IdentityManager::new();
// Register each other with valid capabilities
let alice_reg = alice.create_registration("alice", vec!["worker".to_string()]);
let bob_reg = bob.create_registration("bob", vec!["worker".to_string()]);
// Register in each other's registry
alice.register_member(bob_reg.clone());
bob.register_member(alice_reg.clone());
// Derive session keys - both should derive the same key
let alice_key = alice.derive_session_key("bob", "test-swarm").unwrap();
let bob_key = bob.derive_session_key("alice", "test-swarm").unwrap();
// Keys should match (symmetric ECDH)
assert_eq!(alice_key, bob_key, "Session keys should be symmetric");
}
#[test]
fn test_nonce_replay_protection() {
let identity = IdentityManager::new();
let nonce = IdentityManager::generate_nonce();
let timestamp = chrono::Utc::now().timestamp_millis() as u64;
// First use should succeed
assert!(identity.check_nonce(&nonce, timestamp, "sender-1"));
// Replay should fail
assert!(!identity.check_nonce(&nonce, timestamp, "sender-1"));
// Different sender can use same nonce (per-sender tracking)
assert!(identity.check_nonce(&nonce, timestamp, "sender-2"));
}
}

View File

@@ -0,0 +1,53 @@
//! P2P Swarm v2 - Production Grade Rust Implementation
//!
//! Features:
//! - Ed25519 identity keys + X25519 ephemeral keys for ECDH
//! - AES-256-GCM authenticated encryption
//! - Message replay protection (nonces, counters, timestamps)
//! - GUN-based signaling (no external PeerServer)
//! - IPFS CID pointers for large payloads
//! - Ed25519 signatures on all messages
//! - Relay health monitoring
//! - Task execution envelope with resource budgets
//! - WASM compatible
//! - Quantization (4-32x compression)
//! - Hyperdimensional Computing for pattern matching
mod identity;
mod crypto;
mod relay;
mod artifact;
mod envelope;
#[cfg(feature = "native")]
mod swarm;
mod advanced;
pub use identity::{IdentityManager, KeyPair, RegisteredMember};
pub use crypto::{CryptoV2, EncryptedPayload, CanonicalJson};
pub use relay::RelayManager;
pub use artifact::ArtifactStore;
pub use envelope::{SignedEnvelope, TaskEnvelope, TaskReceipt, ArtifactPointer};
#[cfg(feature = "native")]
pub use swarm::{P2PSwarmV2, SwarmStatus};
pub use advanced::{
// Quantization
ScalarQuantized, BinaryQuantized, CompressedData,
// Hyperdimensional Computing
Hypervector, HdcMemory, HDC_DIMENSION,
// Adaptive compression
AdaptiveCompressor, NetworkCondition,
// Pattern routing
PatternRouter,
// HNSW vector index
HnswIndex,
// Post-quantum crypto
HybridKeyPair, HybridPublicKey, HybridSignature,
// Spiking neural networks
LIFNeuron, SpikingNetwork,
// Semantic embeddings
SemanticEmbedder, SemanticTaskMatcher,
// Raft consensus
RaftNode, RaftState, LogEntry,
RaftVoteRequest, RaftVoteResponse,
RaftAppendEntries, RaftAppendEntriesResponse,
};

View File

@@ -0,0 +1,219 @@
//! Relay Manager - GUN Relay Health Monitoring
//!
//! Tracks relay health, latency, and provides automatic failover
use std::collections::HashMap;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
/// Relay health status
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelayStatus {
pub url: String,
pub healthy: bool,
pub last_check: u64,
pub latency_ms: u32,
pub failures: u8,
}
impl RelayStatus {
fn new(url: String) -> Self {
Self {
url,
healthy: true, // Assume healthy initially
last_check: 0,
latency_ms: 0,
failures: 0,
}
}
}
/// Bootstrap GUN relays (public infrastructure)
pub const BOOTSTRAP_RELAYS: &[&str] = &[
"https://gun-manhattan.herokuapp.com/gun",
"https://gun-us.herokuapp.com/gun",
"https://gun-eu.herokuapp.com/gun",
];
/// Relay Manager for health tracking and failover
pub struct RelayManager {
relays: Arc<RwLock<HashMap<String, RelayStatus>>>,
max_failures: u8,
}
impl RelayManager {
/// Create with bootstrap relays
pub fn new() -> Self {
Self::with_relays(BOOTSTRAP_RELAYS.iter().map(|s| s.to_string()).collect())
}
/// Create with custom relays
pub fn with_relays(relay_urls: Vec<String>) -> Self {
let mut relays = HashMap::new();
for url in relay_urls {
relays.insert(url.clone(), RelayStatus::new(url));
}
Self {
relays: Arc::new(RwLock::new(relays)),
max_failures: 3,
}
}
/// Get list of healthy relays
pub fn get_healthy_relays(&self) -> Vec<String> {
self.relays.read()
.values()
.filter(|r| r.healthy)
.map(|r| r.url.clone())
.collect()
}
/// Get all relays (for initial connection attempts)
pub fn get_all_relays(&self) -> Vec<String> {
self.relays.read()
.keys()
.cloned()
.collect()
}
/// Mark relay as successful
pub fn mark_success(&self, url: &str, latency_ms: u32) {
let mut relays = self.relays.write();
if let Some(relay) = relays.get_mut(url) {
relay.healthy = true;
relay.latency_ms = latency_ms;
relay.failures = 0;
relay.last_check = chrono::Utc::now().timestamp_millis() as u64;
}
}
/// Mark relay as failed
pub fn mark_failed(&self, url: &str) {
let mut relays = self.relays.write();
if let Some(relay) = relays.get_mut(url) {
relay.failures += 1;
relay.last_check = chrono::Utc::now().timestamp_millis() as u64;
if relay.failures >= self.max_failures {
relay.healthy = false;
tracing::warn!("Relay marked unhealthy: {} (failures: {})", url, relay.failures);
}
}
}
/// Add new relay
pub fn add_relay(&self, url: String) {
let mut relays = self.relays.write();
if !relays.contains_key(&url) {
relays.insert(url.clone(), RelayStatus::new(url));
}
}
/// Remove relay
pub fn remove_relay(&self, url: &str) {
self.relays.write().remove(url);
}
/// Get relay metrics
pub fn get_metrics(&self) -> RelayMetrics {
let relays = self.relays.read();
let healthy_relays: Vec<_> = relays.values().filter(|r| r.healthy).collect();
let avg_latency = if healthy_relays.is_empty() {
0
} else {
healthy_relays.iter().map(|r| r.latency_ms as u64).sum::<u64>() / healthy_relays.len() as u64
};
RelayMetrics {
total: relays.len(),
healthy: healthy_relays.len(),
avg_latency_ms: avg_latency as u32,
}
}
/// Export working relay set (for persistence)
pub fn export_working_set(&self) -> Vec<String> {
self.get_healthy_relays()
}
/// Import relay set
pub fn import_relays(&self, urls: Vec<String>) {
for url in urls {
self.add_relay(url);
}
}
/// Reset failed relay (give it another chance)
pub fn reset_relay(&self, url: &str) {
let mut relays = self.relays.write();
if let Some(relay) = relays.get_mut(url) {
relay.healthy = true;
relay.failures = 0;
}
}
/// Get relay by URL
pub fn get_relay(&self, url: &str) -> Option<RelayStatus> {
self.relays.read().get(url).cloned()
}
}
impl Default for RelayManager {
fn default() -> Self {
Self::new()
}
}
/// Relay health metrics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RelayMetrics {
pub total: usize,
pub healthy: usize,
pub avg_latency_ms: u32,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_relay_manager() {
let manager = RelayManager::new();
// Should start with bootstrap relays
let relays = manager.get_all_relays();
assert!(!relays.is_empty());
// All should be healthy initially
let healthy = manager.get_healthy_relays();
assert_eq!(relays.len(), healthy.len());
}
#[test]
fn test_relay_failure_tracking() {
let manager = RelayManager::with_relays(vec!["http://test:8080".to_string()]);
// Mark failed 3 times
manager.mark_failed("http://test:8080");
manager.mark_failed("http://test:8080");
assert!(manager.get_healthy_relays().len() == 1); // Still healthy
manager.mark_failed("http://test:8080");
assert!(manager.get_healthy_relays().is_empty()); // Now unhealthy
}
#[test]
fn test_relay_success_resets_failures() {
let manager = RelayManager::with_relays(vec!["http://test:8080".to_string()]);
manager.mark_failed("http://test:8080");
manager.mark_failed("http://test:8080");
manager.mark_success("http://test:8080", 50);
// Should still be healthy after success
assert!(!manager.get_healthy_relays().is_empty());
}
}

View File

@@ -0,0 +1,611 @@
//! P2P Swarm v2 - Production Grade Coordinator
//!
//! Features:
//! - Registry-based identity (never trust envelope keys)
//! - Membership watcher with heartbeats
//! - Task executor loop with claiming
//! - Artifact publishing and retrieval
use crate::p2p::{
identity::{IdentityManager, RegisteredMember},
crypto::{CryptoV2, CanonicalJson},
relay::RelayManager,
artifact::ArtifactStore,
envelope::{
SignedEnvelope, TaskEnvelope, TaskReceipt, TaskStatus, TaskBudgets,
SignalingMessage, SignalType, ArtifactPointer, ArtifactType,
},
};
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc;
/// Heartbeat interval (30 seconds)
pub const HEARTBEAT_INTERVAL_MS: u64 = 30_000;
/// Heartbeat timeout (2 minutes)
pub const HEARTBEAT_TIMEOUT_MS: u64 = 120_000;
/// Task claim window (5 seconds)
pub const TASK_CLAIM_WINDOW_MS: u64 = 5_000;
/// Swarm status
#[derive(Debug, Clone)]
pub struct SwarmStatus {
pub connected: bool,
pub swarm_id: String,
pub agent_id: String,
pub public_key: [u8; 32],
pub active_peers: usize,
pub pending_tasks: usize,
pub relay_metrics: crate::p2p::relay::RelayMetrics,
}
/// Message handler callback type
pub type MessageHandler = Box<dyn Fn(&[u8], &str) + Send + Sync>;
/// Task claim record
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskClaim {
pub task_id: String,
pub executor_id: String,
pub timestamp: u64,
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
/// Heartbeat message
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Heartbeat {
pub agent_id: String,
pub timestamp: u64,
pub capabilities: Vec<String>,
pub load: f32, // 0.0 to 1.0
#[serde(with = "BigArray")]
pub signature: [u8; 64],
}
impl Heartbeat {
/// Create canonical representation for signing
pub fn canonical_for_signing(&self) -> String {
let for_signing = serde_json::json!({
"agent_id": self.agent_id,
"capabilities": self.capabilities,
"load": self.load,
"timestamp": self.timestamp,
});
CanonicalJson::stringify(&for_signing)
}
}
/// P2P Swarm Coordinator
pub struct P2PSwarmV2 {
/// Our identity manager
identity: Arc<IdentityManager>,
/// Relay manager
relay_manager: Arc<RelayManager>,
/// Artifact store
artifact_store: Arc<ArtifactStore>,
/// Swarm key (for envelope encryption)
swarm_key: [u8; 32],
/// Swarm ID (derived from swarm key)
swarm_id: String,
/// Our agent ID
agent_id: String,
/// Our capabilities
capabilities: Vec<String>,
/// Connection state
connected: Arc<RwLock<bool>>,
/// Message handlers by topic
handlers: Arc<RwLock<HashMap<String, MessageHandler>>>,
/// Pending tasks
pending_tasks: Arc<RwLock<HashMap<String, TaskEnvelope>>>,
/// Task claims
task_claims: Arc<RwLock<HashMap<String, TaskClaim>>>,
/// Shutdown signal
shutdown_tx: Option<mpsc::Sender<()>>,
}
impl P2PSwarmV2 {
/// Create new swarm coordinator
pub fn new(agent_id: &str, swarm_key: Option<[u8; 32]>, capabilities: Vec<String>) -> Self {
let identity = Arc::new(IdentityManager::new());
let relay_manager = Arc::new(RelayManager::new());
let artifact_store = Arc::new(ArtifactStore::new());
// Generate or use provided swarm key
let swarm_key = swarm_key.unwrap_or_else(|| {
let mut key = [0u8; 32];
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut key);
key
});
// Derive swarm ID from key
let swarm_id = hex::encode(&CryptoV2::hash(&swarm_key)[..8]);
Self {
identity,
relay_manager,
artifact_store,
swarm_key,
swarm_id,
agent_id: agent_id.to_string(),
capabilities,
connected: Arc::new(RwLock::new(false)),
handlers: Arc::new(RwLock::new(HashMap::new())),
pending_tasks: Arc::new(RwLock::new(HashMap::new())),
task_claims: Arc::new(RwLock::new(HashMap::new())),
shutdown_tx: None,
}
}
/// Get swarm key (for sharing with peers)
pub fn swarm_key(&self) -> [u8; 32] {
self.swarm_key
}
/// Get swarm key as base64
pub fn swarm_key_base64(&self) -> String {
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, self.swarm_key)
}
/// Connect to swarm
pub async fn connect(&mut self) -> Result<(), String> {
tracing::info!("Connecting to swarm: {}", self.swarm_id);
// Register ourselves
let registration = self.identity.create_registration(&self.agent_id, self.capabilities.clone());
self.identity.register_member(registration);
*self.connected.write() = true;
// Start heartbeat loop
self.start_heartbeat_loop();
tracing::info!("Connected to swarm {} as {}", self.swarm_id, self.agent_id);
Ok(())
}
/// Disconnect from swarm
pub fn disconnect(&mut self) {
*self.connected.write() = false;
if let Some(tx) = self.shutdown_tx.take() {
let _ = tx.try_send(());
}
tracing::info!("Disconnected from swarm {}", self.swarm_id);
}
/// Start heartbeat loop
fn start_heartbeat_loop(&mut self) {
let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1);
self.shutdown_tx = Some(shutdown_tx);
let identity = self.identity.clone();
let agent_id = self.agent_id.clone();
let capabilities = self.capabilities.clone();
let connected = self.connected.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(HEARTBEAT_INTERVAL_MS));
loop {
tokio::select! {
_ = interval.tick() => {
if !*connected.read() {
break;
}
// Create and sign heartbeat
let mut heartbeat = Heartbeat {
agent_id: agent_id.clone(),
timestamp: chrono::Utc::now().timestamp_millis() as u64,
capabilities: capabilities.clone(),
load: 0.5, // TODO: Measure actual load
signature: [0u8; 64],
};
let canonical = heartbeat.canonical_for_signing();
heartbeat.signature = identity.sign(canonical.as_bytes());
// In real implementation, publish to GUN
tracing::debug!("Published heartbeat for {}", agent_id);
// Cleanup expired nonces
identity.cleanup_nonces();
}
_ = shutdown_rx.recv() => {
tracing::debug!("Heartbeat loop shutting down");
break;
}
}
}
});
}
/// Register a verified member
pub fn register_member(&self, member: RegisteredMember) -> bool {
// Strict validation: require all fields
if member.x25519_public_key == [0u8; 32] {
tracing::warn!("Rejecting member {}: missing x25519_public_key", member.agent_id);
return false;
}
if member.capabilities.is_empty() {
tracing::warn!("Rejecting member {}: empty capabilities", member.agent_id);
return false;
}
if member.joined_at == 0 {
tracing::warn!("Rejecting member {}: invalid joined_at", member.agent_id);
return false;
}
self.identity.register_member(member)
}
/// Resolve member from registry (returns full member, not just key)
pub fn resolve_member(&self, agent_id: &str) -> Option<RegisteredMember> {
self.identity.get_member(agent_id)
}
/// Subscribe to topic with handler
pub fn subscribe(&self, topic: &str, handler: MessageHandler) {
self.handlers.write().insert(topic.to_string(), handler);
}
/// Publish message to topic
pub fn publish(&self, topic: &str, payload: &[u8]) -> Result<String, String> {
if !*self.connected.read() {
return Err("Not connected".to_string());
}
let nonce = IdentityManager::generate_nonce();
let counter = self.identity.next_send_counter();
// Encrypt payload with swarm key
let encrypted = CryptoV2::encrypt(payload, &self.swarm_key)?;
// Create envelope
let mut envelope = SignedEnvelope::new_unsigned(
format!("{}:{}", self.swarm_id, uuid::Uuid::new_v4()),
topic.to_string(),
self.agent_id.clone(),
payload,
nonce,
counter,
encrypted,
);
// Sign canonical header
let canonical = envelope.canonical_header();
envelope.signature = self.identity.sign(canonical.as_bytes());
// In real implementation, publish to GUN
tracing::debug!("Published message {} to topic {}", envelope.message_id, topic);
Ok(envelope.message_id)
}
/// Process incoming envelope (with full verification)
pub fn process_envelope(&self, envelope: &SignedEnvelope) -> Result<Vec<u8>, String> {
// 1. Check nonce and timestamp (replay protection)
if !self.identity.check_nonce(&envelope.nonce, envelope.timestamp, &envelope.sender_id) {
return Err("Replay detected or expired".to_string());
}
// 2. Check counter (ordering)
if !self.identity.validate_recv_counter(&envelope.sender_id, envelope.counter) {
return Err("Invalid counter".to_string());
}
// 3. Resolve sender from registry (NOT from envelope)
let _member = self.resolve_member(&envelope.sender_id)
.ok_or_else(|| format!("Sender {} not in registry", envelope.sender_id))?;
// 4. Verify signature using REGISTRY key
let canonical = envelope.canonical_header();
if !self.identity.verify_from_registry(&envelope.sender_id, canonical.as_bytes(), &envelope.signature) {
return Err("Invalid signature".to_string());
}
// 5. Decrypt with swarm key
let payload = CryptoV2::decrypt(&envelope.encrypted, &self.swarm_key)?;
// 6. Verify payload hash
let payload_hash = CryptoV2::hash(&payload);
if payload_hash != envelope.payload_hash {
return Err("Payload hash mismatch".to_string());
}
// 7. Dispatch to handler
if let Some(handler) = self.handlers.read().get(&envelope.topic) {
handler(&payload, &envelope.sender_id);
}
Ok(payload)
}
/// Create signaling message (WebRTC offer/answer/ice)
pub fn create_signaling(&self, signal_type: SignalType, to: &str, payload: String, ttl_ms: u64) -> SignalingMessage {
let mut signal = SignalingMessage::new_unsigned(
signal_type,
self.agent_id.clone(),
to.to_string(),
payload,
ttl_ms,
);
// Sign using canonical (no pubkey in signed data)
let canonical = signal.canonical_for_signing();
signal.signature = self.identity.sign(canonical.as_bytes());
signal
}
/// Verify signaling message (using REGISTRY key only)
pub fn verify_signaling(&self, signal: &SignalingMessage) -> bool {
// Check expiry
if signal.is_expired() {
return false;
}
// Verify payload hash
let payload_hash = CryptoV2::hash(signal.payload.as_bytes());
if payload_hash != signal.payload_hash {
return false;
}
// Verify signature using REGISTRY key (not from signal)
let canonical = signal.canonical_for_signing();
self.identity.verify_from_registry(&signal.from, canonical.as_bytes(), &signal.signature)
}
/// Store artifact and get CID
pub fn store_artifact(&self, data: &[u8], compress: bool) -> Result<String, String> {
let info = self.artifact_store.store(data, compress)?;
Ok(info.cid)
}
/// Retrieve artifact by CID
pub fn retrieve_artifact(&self, cid: &str) -> Option<Vec<u8>> {
self.artifact_store.retrieve(cid)
}
/// Create and sign artifact pointer
pub fn create_artifact_pointer(&self, artifact_type: ArtifactType, cid: &str, dimensions: &str) -> Option<ArtifactPointer> {
self.artifact_store.create_pointer(artifact_type, &self.agent_id, cid, dimensions, &self.identity)
}
/// Submit task
pub fn submit_task(&self, task: TaskEnvelope) -> Result<String, String> {
let task_id = task.task_id.clone();
self.pending_tasks.write().insert(task_id.clone(), task.clone());
// Publish to tasks topic
let task_bytes = serde_json::to_vec(&task)
.map_err(|e| e.to_string())?;
self.publish("tasks", &task_bytes)?;
Ok(task_id)
}
/// Claim task for execution
pub fn claim_task(&self, task_id: &str) -> Result<TaskClaim, String> {
// Check if task exists
if !self.pending_tasks.read().contains_key(task_id) {
return Err("Task not found".to_string());
}
// Check if already claimed within window
let now = chrono::Utc::now().timestamp_millis() as u64;
if let Some(existing) = self.task_claims.read().get(task_id) {
if now - existing.timestamp < TASK_CLAIM_WINDOW_MS {
return Err(format!("Task already claimed by {}", existing.executor_id));
}
}
// Create claim
let mut claim = TaskClaim {
task_id: task_id.to_string(),
executor_id: self.agent_id.clone(),
timestamp: now,
signature: [0u8; 64],
};
// Sign claim
let canonical = serde_json::json!({
"executor_id": claim.executor_id,
"task_id": claim.task_id,
"timestamp": claim.timestamp,
});
let canonical_str = CanonicalJson::stringify(&canonical);
claim.signature = self.identity.sign(canonical_str.as_bytes());
// Store claim
self.task_claims.write().insert(task_id.to_string(), claim.clone());
Ok(claim)
}
/// Create signed task receipt
pub fn create_receipt(
&self,
task: &TaskEnvelope,
result_cid: String,
status: TaskStatus,
fuel_used: u64,
memory_peak_mb: u32,
execution_ms: u64,
input_hash: [u8; 32],
output_hash: [u8; 32],
module_hash: [u8; 32],
) -> TaskReceipt {
let now = chrono::Utc::now().timestamp_millis() as u64;
let mut receipt = TaskReceipt::new_unsigned(
task,
self.agent_id.clone(),
result_cid,
status,
fuel_used,
memory_peak_mb,
execution_ms,
input_hash,
output_hash,
module_hash,
now - execution_ms,
now,
);
// Sign the FULL canonical receipt (all binding fields)
let canonical = receipt.canonical_for_signing();
receipt.signature = self.identity.sign(canonical.as_bytes());
receipt
}
/// Get swarm status
pub fn status(&self) -> SwarmStatus {
SwarmStatus {
connected: *self.connected.read(),
swarm_id: self.swarm_id.clone(),
agent_id: self.agent_id.clone(),
public_key: self.identity.public_key(),
active_peers: self.identity.get_active_members(HEARTBEAT_TIMEOUT_MS).len(),
pending_tasks: self.pending_tasks.read().len(),
relay_metrics: self.relay_manager.get_metrics(),
}
}
/// Get active peer IDs
pub fn active_peers(&self) -> Vec<String> {
self.identity.get_active_members(HEARTBEAT_TIMEOUT_MS)
.into_iter()
.map(|m| m.agent_id)
.collect()
}
/// Derive session key for direct channel with peer
pub fn derive_session_key(&self, peer_id: &str) -> Option<[u8; 32]> {
self.identity.derive_session_key(peer_id, &self.swarm_id)
}
}
impl Drop for P2PSwarmV2 {
fn drop(&mut self) {
self.disconnect();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_swarm_connect() {
let mut swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
swarm.connect().await.unwrap();
let status = swarm.status();
assert!(status.connected);
assert_eq!(status.agent_id, "test-agent");
}
#[tokio::test]
async fn test_member_registration() {
let swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
// Create another identity
let other_identity = IdentityManager::new();
let registration = other_identity.create_registration("peer-1", vec!["worker".to_string()]);
// Should succeed with valid registration
assert!(swarm.register_member(registration));
// Should be able to resolve
let member = swarm.resolve_member("peer-1");
assert!(member.is_some());
}
#[tokio::test]
async fn test_strict_member_validation() {
let swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
// Registration with missing x25519 key should fail
let mut bad_registration = RegisteredMember {
agent_id: "bad-peer".to_string(),
ed25519_public_key: [0u8; 32],
x25519_public_key: [0u8; 32], // Invalid: all zeros
capabilities: vec!["worker".to_string()],
joined_at: chrono::Utc::now().timestamp_millis() as u64,
last_heartbeat: 0,
signature: [0u8; 64],
};
assert!(!swarm.register_member(bad_registration.clone()));
// Empty capabilities should also fail
bad_registration.x25519_public_key = [1u8; 32];
bad_registration.capabilities = vec![];
assert!(!swarm.register_member(bad_registration));
}
#[test]
fn test_artifact_store_and_pointer() {
let swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
let data = b"test artifact data";
let cid = swarm.store_artifact(data, true).unwrap();
let retrieved = swarm.retrieve_artifact(&cid).unwrap();
assert_eq!(data.as_slice(), retrieved.as_slice());
let pointer = swarm.create_artifact_pointer(ArtifactType::QTable, &cid, "100x10");
assert!(pointer.is_some());
}
#[tokio::test]
async fn test_task_claim() {
let mut swarm = P2PSwarmV2::new("test-agent", None, vec!["executor".to_string()]);
swarm.connect().await.unwrap();
let task = TaskEnvelope::new(
"task-001".to_string(),
"local:module".to_string(),
"process".to_string(),
"local:input".to_string(),
[0u8; 32],
TaskBudgets {
fuel_limit: 1000000,
memory_mb: 128,
timeout_ms: 30000,
},
"requester".to_string(),
chrono::Utc::now().timestamp_millis() as u64 + 60000,
1,
);
// Submit task
swarm.pending_tasks.write().insert(task.task_id.clone(), task.clone());
// Claim should succeed
let claim = swarm.claim_task("task-001").unwrap();
assert_eq!(claim.task_id, "task-001");
assert_eq!(claim.executor_id, "test-agent");
// Second claim should fail (within window)
assert!(swarm.claim_task("task-001").is_err());
}
}