Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
1006
vendor/ruvector/examples/edge-net/src/swarm/collective.rs
vendored
Normal file
1006
vendor/ruvector/examples/edge-net/src/swarm/collective.rs
vendored
Normal file
File diff suppressed because it is too large
Load Diff
704
vendor/ruvector/examples/edge-net/src/swarm/consensus.rs
vendored
Normal file
704
vendor/ruvector/examples/edge-net/src/swarm/consensus.rs
vendored
Normal file
@@ -0,0 +1,704 @@
|
||||
//! Entropy-Based Consensus for Swarm Intelligence
|
||||
//!
|
||||
//! Implements entropy-minimizing negotiation between swarm nodes.
|
||||
//! Consensus is achieved when belief entropy falls below threshold,
|
||||
//! indicating the swarm has converged to a shared decision.
|
||||
//!
|
||||
//! ## Theory
|
||||
//!
|
||||
//! Shannon entropy measures uncertainty in a probability distribution:
|
||||
//! H = -SUM(p_i * log2(p_i))
|
||||
//!
|
||||
//! Low entropy = high certainty = convergence
|
||||
//! High entropy = uncertainty = negotiation needed
|
||||
//!
|
||||
//! ## Algorithm
|
||||
//!
|
||||
//! 1. Each node maintains belief probabilities for decisions
|
||||
//! 2. Nodes exchange beliefs with peers (gossip)
|
||||
//! 3. Beliefs are averaged: p_new = 0.5 * p_local + 0.5 * p_peer
|
||||
//! 4. Convergence when H < threshold (e.g., 0.1)
|
||||
//!
|
||||
//! ## References
|
||||
//!
|
||||
//! - Degroot consensus model
|
||||
//! - Entropy-based stopping criteria
|
||||
|
||||
use wasm_bindgen::prelude::*;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use rustc_hash::FxHashMap;
|
||||
use std::sync::RwLock;
|
||||
|
||||
// ============================================================================
|
||||
// Decision Types
|
||||
// ============================================================================
|
||||
|
||||
/// A decision that the swarm can make
|
||||
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Decision {
|
||||
/// Accept a proposed action
|
||||
Accept(u64),
|
||||
/// Reject a proposed action
|
||||
Reject(u64),
|
||||
/// Route task to specific node
|
||||
RouteToNode(u32),
|
||||
/// Allocate resources
|
||||
Allocate(u32),
|
||||
/// Elect a coordinator
|
||||
ElectCoordinator(u32),
|
||||
/// Custom decision with ID
|
||||
Custom(u64),
|
||||
}
|
||||
|
||||
impl Decision {
|
||||
/// Get decision ID for hashing
|
||||
pub fn id(&self) -> u64 {
|
||||
match self {
|
||||
Decision::Accept(id) => *id,
|
||||
Decision::Reject(id) => *id | 0x8000_0000_0000_0000,
|
||||
Decision::RouteToNode(node) => *node as u64 | 0x1000_0000_0000_0000,
|
||||
Decision::Allocate(amount) => *amount as u64 | 0x2000_0000_0000_0000,
|
||||
Decision::ElectCoordinator(node) => *node as u64 | 0x3000_0000_0000_0000,
|
||||
Decision::Custom(id) => *id | 0x4000_0000_0000_0000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Entropy-Based Consensus
|
||||
// ============================================================================
|
||||
|
||||
/// Configuration for entropy consensus
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct EntropyConsensusConfig {
|
||||
/// Entropy threshold for convergence (lower = stricter)
|
||||
pub entropy_threshold: f32,
|
||||
/// Maximum negotiation rounds before timeout
|
||||
pub max_negotiation_rounds: usize,
|
||||
/// Mixing weight for local beliefs (0.0-1.0)
|
||||
pub local_weight: f32,
|
||||
/// Minimum probability to consider (prevents log(0))
|
||||
pub min_probability: f32,
|
||||
/// Enable temperature-based annealing
|
||||
pub enable_annealing: bool,
|
||||
/// Initial temperature for annealing
|
||||
pub initial_temperature: f32,
|
||||
}
|
||||
|
||||
impl Default for EntropyConsensusConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
entropy_threshold: 0.1,
|
||||
max_negotiation_rounds: 50,
|
||||
local_weight: 0.5,
|
||||
min_probability: 1e-6,
|
||||
enable_annealing: true,
|
||||
initial_temperature: 1.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Entropy-based consensus engine for swarm decisions
|
||||
#[wasm_bindgen]
|
||||
pub struct EntropyConsensus {
|
||||
/// Belief probabilities for each decision
|
||||
beliefs: RwLock<FxHashMap<u64, f32>>,
|
||||
/// Entropy threshold for convergence
|
||||
entropy_threshold: f32,
|
||||
/// Completed negotiation rounds
|
||||
negotiation_rounds: RwLock<usize>,
|
||||
/// Maximum rounds allowed
|
||||
max_rounds: usize,
|
||||
/// Mixing weight for local beliefs
|
||||
local_weight: f32,
|
||||
/// Minimum probability (prevents log(0))
|
||||
min_prob: f32,
|
||||
/// Current temperature for annealing
|
||||
temperature: RwLock<f32>,
|
||||
/// Initial temperature
|
||||
initial_temperature: f32,
|
||||
/// Enable annealing
|
||||
enable_annealing: bool,
|
||||
/// History of entropy values (for monitoring convergence)
|
||||
entropy_history: RwLock<Vec<f32>>,
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
impl EntropyConsensus {
|
||||
/// Create new entropy consensus with default configuration
|
||||
#[wasm_bindgen(constructor)]
|
||||
pub fn new() -> Self {
|
||||
Self::with_config(EntropyConsensusConfig::default())
|
||||
}
|
||||
|
||||
/// Create with custom entropy threshold
|
||||
#[wasm_bindgen(js_name = withThreshold)]
|
||||
pub fn with_threshold(threshold: f32) -> Self {
|
||||
let mut config = EntropyConsensusConfig::default();
|
||||
config.entropy_threshold = threshold.clamp(0.01, 2.0);
|
||||
Self::with_config(config)
|
||||
}
|
||||
|
||||
/// Get current entropy of belief distribution
|
||||
#[wasm_bindgen]
|
||||
pub fn entropy(&self) -> f32 {
|
||||
let beliefs = self.beliefs.read().unwrap();
|
||||
self.compute_entropy(&beliefs)
|
||||
}
|
||||
|
||||
/// Check if consensus has been reached
|
||||
#[wasm_bindgen]
|
||||
pub fn converged(&self) -> bool {
|
||||
self.entropy() < self.entropy_threshold
|
||||
}
|
||||
|
||||
/// Get the winning decision (if converged)
|
||||
#[wasm_bindgen(js_name = getDecision)]
|
||||
pub fn get_decision(&self) -> Option<u64> {
|
||||
if !self.converged() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let beliefs = self.beliefs.read().unwrap();
|
||||
beliefs.iter()
|
||||
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.map(|(&id, _)| id)
|
||||
}
|
||||
|
||||
/// Get number of negotiation rounds completed
|
||||
#[wasm_bindgen(js_name = getRounds)]
|
||||
pub fn get_rounds(&self) -> usize {
|
||||
*self.negotiation_rounds.read().unwrap()
|
||||
}
|
||||
|
||||
/// Get the entropy threshold for convergence
|
||||
#[wasm_bindgen(js_name = getEntropyThreshold)]
|
||||
pub fn get_entropy_threshold(&self) -> f32 {
|
||||
self.entropy_threshold
|
||||
}
|
||||
|
||||
/// Check if negotiation has timed out
|
||||
#[wasm_bindgen(js_name = hasTimedOut)]
|
||||
pub fn has_timed_out(&self) -> bool {
|
||||
*self.negotiation_rounds.read().unwrap() >= self.max_rounds
|
||||
}
|
||||
|
||||
/// Get belief probability for a decision
|
||||
#[wasm_bindgen(js_name = getBelief)]
|
||||
pub fn get_belief(&self, decision_id: u64) -> f32 {
|
||||
self.beliefs.read().unwrap()
|
||||
.get(&decision_id)
|
||||
.copied()
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
/// Set initial belief for a decision
|
||||
#[wasm_bindgen(js_name = setBelief)]
|
||||
pub fn set_belief(&self, decision_id: u64, probability: f32) {
|
||||
let prob = probability.clamp(self.min_prob, 1.0);
|
||||
self.beliefs.write().unwrap().insert(decision_id, prob);
|
||||
self.normalize_beliefs();
|
||||
}
|
||||
|
||||
/// Set belief without normalizing (for batch updates)
|
||||
/// Call normalize_beliefs() after all set_belief_raw calls
|
||||
pub fn set_belief_raw(&self, decision_id: u64, probability: f32) {
|
||||
let prob = probability.clamp(self.min_prob, 1.0);
|
||||
self.beliefs.write().unwrap().insert(decision_id, prob);
|
||||
}
|
||||
|
||||
/// Manually trigger normalization (for use after set_belief_raw)
|
||||
pub fn finalize_beliefs(&self) {
|
||||
self.normalize_beliefs();
|
||||
}
|
||||
|
||||
/// Get number of decision options
|
||||
#[wasm_bindgen(js_name = optionCount)]
|
||||
pub fn option_count(&self) -> usize {
|
||||
self.beliefs.read().unwrap().len()
|
||||
}
|
||||
|
||||
/// Get current temperature (for annealing)
|
||||
#[wasm_bindgen(js_name = getTemperature)]
|
||||
pub fn get_temperature(&self) -> f32 {
|
||||
*self.temperature.read().unwrap()
|
||||
}
|
||||
|
||||
/// Get entropy history as JSON
|
||||
#[wasm_bindgen(js_name = getEntropyHistory)]
|
||||
pub fn get_entropy_history(&self) -> String {
|
||||
let history = self.entropy_history.read().unwrap();
|
||||
serde_json::to_string(&*history).unwrap_or_else(|_| "[]".to_string())
|
||||
}
|
||||
|
||||
/// Get consensus statistics as JSON
|
||||
#[wasm_bindgen(js_name = getStats)]
|
||||
pub fn get_stats(&self) -> String {
|
||||
let entropy = self.entropy();
|
||||
let rounds = *self.negotiation_rounds.read().unwrap();
|
||||
let converged = entropy < self.entropy_threshold;
|
||||
let temp = *self.temperature.read().unwrap();
|
||||
let options = self.beliefs.read().unwrap().len();
|
||||
|
||||
format!(
|
||||
r#"{{"entropy":{:.4},"rounds":{},"converged":{},"temperature":{:.4},"options":{},"threshold":{:.4}}}"#,
|
||||
entropy, rounds, converged, temp, options, self.entropy_threshold
|
||||
)
|
||||
}
|
||||
|
||||
/// Reset consensus state for new decision
|
||||
#[wasm_bindgen]
|
||||
pub fn reset(&self) {
|
||||
*self.beliefs.write().unwrap() = FxHashMap::default();
|
||||
*self.negotiation_rounds.write().unwrap() = 0;
|
||||
*self.temperature.write().unwrap() = self.initial_temperature;
|
||||
self.entropy_history.write().unwrap().clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EntropyConsensus {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl EntropyConsensus {
|
||||
/// Create with full configuration
|
||||
pub fn with_config(config: EntropyConsensusConfig) -> Self {
|
||||
Self {
|
||||
beliefs: RwLock::new(FxHashMap::default()),
|
||||
entropy_threshold: config.entropy_threshold,
|
||||
negotiation_rounds: RwLock::new(0),
|
||||
max_rounds: config.max_negotiation_rounds,
|
||||
local_weight: config.local_weight,
|
||||
min_prob: config.min_probability,
|
||||
temperature: RwLock::new(config.initial_temperature),
|
||||
initial_temperature: config.initial_temperature,
|
||||
enable_annealing: config.enable_annealing,
|
||||
entropy_history: RwLock::new(Vec::with_capacity(config.max_negotiation_rounds)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Negotiate with peer beliefs to minimize entropy
|
||||
///
|
||||
/// Updates local beliefs by averaging with peer beliefs:
|
||||
/// p_new = local_weight * p_local + (1 - local_weight) * p_peer
|
||||
///
|
||||
/// This implements a weighted averaging consensus protocol.
|
||||
pub fn negotiate(&self, peer_beliefs: &FxHashMap<u64, f32>) {
|
||||
let peer_weight = 1.0 - self.local_weight;
|
||||
|
||||
// Apply temperature-scaled mixing if annealing is enabled
|
||||
let effective_peer_weight = if self.enable_annealing {
|
||||
let temp = *self.temperature.read().unwrap();
|
||||
peer_weight * temp
|
||||
} else {
|
||||
peer_weight
|
||||
};
|
||||
|
||||
let effective_local_weight = 1.0 - effective_peer_weight;
|
||||
|
||||
{
|
||||
let mut beliefs = self.beliefs.write().unwrap();
|
||||
|
||||
// Update beliefs for all known decisions
|
||||
for (decision_id, peer_prob) in peer_beliefs {
|
||||
let my_prob = beliefs.get(decision_id).copied().unwrap_or(0.5);
|
||||
let new_prob = effective_local_weight * my_prob + effective_peer_weight * peer_prob;
|
||||
beliefs.insert(*decision_id, new_prob.max(self.min_prob));
|
||||
}
|
||||
|
||||
// Also consider local-only beliefs (peer may not know about)
|
||||
let local_only: Vec<u64> = beliefs.keys()
|
||||
.filter(|k| !peer_beliefs.contains_key(*k))
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
for decision_id in local_only {
|
||||
if let Some(prob) = beliefs.get_mut(&decision_id) {
|
||||
// Decay beliefs not shared by peer
|
||||
*prob = (*prob * effective_local_weight).max(self.min_prob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.normalize_beliefs();
|
||||
|
||||
// Update negotiation round count
|
||||
{
|
||||
let mut rounds = self.negotiation_rounds.write().unwrap();
|
||||
*rounds += 1;
|
||||
}
|
||||
|
||||
// Update temperature (simulated annealing)
|
||||
if self.enable_annealing {
|
||||
let mut temp = self.temperature.write().unwrap();
|
||||
*temp = (*temp * 0.95).max(0.01); // Exponential cooling
|
||||
}
|
||||
|
||||
// Record entropy history
|
||||
{
|
||||
let entropy = self.entropy();
|
||||
let mut history = self.entropy_history.write().unwrap();
|
||||
history.push(entropy);
|
||||
}
|
||||
}
|
||||
|
||||
/// Negotiate with peer beliefs (HashMap variant for convenience)
|
||||
pub fn negotiate_map(&self, peer_beliefs: &std::collections::HashMap<Decision, f32>) {
|
||||
let fx_map: FxHashMap<u64, f32> = peer_beliefs.iter()
|
||||
.map(|(d, p)| (d.id(), *p))
|
||||
.collect();
|
||||
self.negotiate(&fx_map);
|
||||
}
|
||||
|
||||
/// Add a decision option with initial belief
|
||||
pub fn add_option(&self, decision: Decision, initial_belief: f32) {
|
||||
let prob = initial_belief.clamp(self.min_prob, 1.0);
|
||||
self.beliefs.write().unwrap().insert(decision.id(), prob);
|
||||
self.normalize_beliefs();
|
||||
}
|
||||
|
||||
/// Get the best decision with its probability
|
||||
pub fn decision(&self) -> Option<(u64, f32)> {
|
||||
if !self.converged() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let beliefs = self.beliefs.read().unwrap();
|
||||
beliefs.iter()
|
||||
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.map(|(&id, &prob)| (id, prob))
|
||||
}
|
||||
|
||||
/// Get all beliefs as a map
|
||||
pub fn get_all_beliefs(&self) -> FxHashMap<u64, f32> {
|
||||
self.beliefs.read().unwrap().clone()
|
||||
}
|
||||
|
||||
/// Set multiple beliefs at once (normalized together)
|
||||
/// This avoids the issue where individual set_belief calls normalize prematurely
|
||||
pub fn set_beliefs(&self, new_beliefs: &[(u64, f32)]) {
|
||||
let mut beliefs = self.beliefs.write().unwrap();
|
||||
for (decision_id, probability) in new_beliefs {
|
||||
let prob = probability.clamp(self.min_prob, 1.0);
|
||||
beliefs.insert(*decision_id, prob);
|
||||
}
|
||||
drop(beliefs);
|
||||
self.normalize_beliefs();
|
||||
}
|
||||
|
||||
/// Compute Shannon entropy of belief distribution
|
||||
fn compute_entropy(&self, beliefs: &FxHashMap<u64, f32>) -> f32 {
|
||||
if beliefs.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
// H = -SUM(p_i * log2(p_i))
|
||||
-beliefs.values()
|
||||
.filter(|&&p| p > self.min_prob)
|
||||
.map(|&p| {
|
||||
let p_clamped = p.clamp(self.min_prob, 1.0);
|
||||
p_clamped * p_clamped.log2()
|
||||
})
|
||||
.sum::<f32>()
|
||||
}
|
||||
|
||||
/// Normalize beliefs to sum to 1.0
|
||||
fn normalize_beliefs(&self) {
|
||||
let mut beliefs = self.beliefs.write().unwrap();
|
||||
let sum: f32 = beliefs.values().sum();
|
||||
|
||||
if sum > 0.0 && sum != 1.0 {
|
||||
for prob in beliefs.values_mut() {
|
||||
*prob /= sum;
|
||||
}
|
||||
} else if sum == 0.0 && !beliefs.is_empty() {
|
||||
// Uniform distribution if all zeros
|
||||
let uniform = 1.0 / beliefs.len() as f32;
|
||||
for prob in beliefs.values_mut() {
|
||||
*prob = uniform;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Multi-Phase Consensus
|
||||
// ============================================================================
|
||||
|
||||
/// Phase of consensus protocol
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum ConsensusPhase {
|
||||
/// Proposing options
|
||||
Proposal,
|
||||
/// Negotiating beliefs
|
||||
Negotiation,
|
||||
/// Final voting
|
||||
Voting,
|
||||
/// Consensus reached
|
||||
Committed,
|
||||
/// Failed to reach consensus
|
||||
Aborted,
|
||||
}
|
||||
|
||||
/// Multi-phase consensus coordinator
|
||||
pub struct ConsensusCoordinator {
|
||||
/// Current phase
|
||||
phase: RwLock<ConsensusPhase>,
|
||||
/// Active consensus instances by topic
|
||||
instances: RwLock<FxHashMap<String, EntropyConsensus>>,
|
||||
/// Phase transition timestamps
|
||||
phase_times: RwLock<Vec<u64>>,
|
||||
/// Quorum requirement (fraction of nodes)
|
||||
quorum: f32,
|
||||
}
|
||||
|
||||
impl ConsensusCoordinator {
|
||||
/// Create new coordinator with quorum requirement
|
||||
pub fn new(quorum: f32) -> Self {
|
||||
Self {
|
||||
phase: RwLock::new(ConsensusPhase::Proposal),
|
||||
instances: RwLock::new(FxHashMap::default()),
|
||||
phase_times: RwLock::new(Vec::new()),
|
||||
quorum: quorum.clamp(0.5, 1.0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start consensus for a topic
|
||||
pub fn start_consensus(&self, topic: &str, config: EntropyConsensusConfig) {
|
||||
let mut instances = self.instances.write().unwrap();
|
||||
instances.insert(topic.to_string(), EntropyConsensus::with_config(config));
|
||||
*self.phase.write().unwrap() = ConsensusPhase::Proposal;
|
||||
}
|
||||
|
||||
/// Get consensus instance for topic
|
||||
pub fn get_instance(&self, topic: &str) -> Option<EntropyConsensus> {
|
||||
self.instances.read().unwrap().get(topic).map(|c| {
|
||||
// Return a new instance with same state
|
||||
let config = EntropyConsensusConfig {
|
||||
entropy_threshold: c.entropy_threshold,
|
||||
max_negotiation_rounds: c.max_rounds,
|
||||
local_weight: c.local_weight,
|
||||
min_probability: c.min_prob,
|
||||
enable_annealing: c.enable_annealing,
|
||||
initial_temperature: c.initial_temperature,
|
||||
};
|
||||
EntropyConsensus::with_config(config)
|
||||
})
|
||||
}
|
||||
|
||||
/// Advance phase based on state
|
||||
pub fn advance_phase(&self, topic: &str) -> ConsensusPhase {
|
||||
let instances = self.instances.read().unwrap();
|
||||
|
||||
if let Some(consensus) = instances.get(topic) {
|
||||
let mut phase = self.phase.write().unwrap();
|
||||
|
||||
match *phase {
|
||||
ConsensusPhase::Proposal => {
|
||||
if consensus.option_count() > 0 {
|
||||
*phase = ConsensusPhase::Negotiation;
|
||||
}
|
||||
}
|
||||
ConsensusPhase::Negotiation => {
|
||||
if consensus.converged() {
|
||||
*phase = ConsensusPhase::Voting;
|
||||
} else if consensus.has_timed_out() {
|
||||
*phase = ConsensusPhase::Aborted;
|
||||
}
|
||||
}
|
||||
ConsensusPhase::Voting => {
|
||||
// Check if quorum reached
|
||||
if consensus.converged() {
|
||||
*phase = ConsensusPhase::Committed;
|
||||
}
|
||||
}
|
||||
ConsensusPhase::Committed | ConsensusPhase::Aborted => {
|
||||
// Terminal states
|
||||
}
|
||||
}
|
||||
|
||||
*phase
|
||||
} else {
|
||||
ConsensusPhase::Aborted
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current phase
|
||||
pub fn current_phase(&self) -> ConsensusPhase {
|
||||
*self.phase.read().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConsensusCoordinator {
|
||||
fn default() -> Self {
|
||||
Self::new(0.67)
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_entropy_calculation() {
|
||||
let consensus = EntropyConsensus::new();
|
||||
|
||||
// Use set_beliefs to set multiple beliefs at once (avoids intermediate normalization)
|
||||
consensus.set_beliefs(&[(1, 0.5), (2, 0.5)]);
|
||||
let uniform_entropy = consensus.entropy();
|
||||
assert!((uniform_entropy - 1.0).abs() < 0.01, "Uniform entropy should be 1.0, got {}", uniform_entropy); // log2(2) = 1
|
||||
|
||||
// Reset and test concentrated distribution
|
||||
consensus.reset();
|
||||
consensus.set_beliefs(&[(1, 0.99), (2, 0.01)]);
|
||||
let concentrated_entropy = consensus.entropy();
|
||||
assert!(concentrated_entropy < 0.1, "Concentrated entropy should be < 0.1, got {}", concentrated_entropy); // Very low entropy
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convergence() {
|
||||
let config = EntropyConsensusConfig {
|
||||
entropy_threshold: 0.35, // Entropy of 0.95:0.05 is ~0.286, so use threshold > 0.286
|
||||
..Default::default()
|
||||
};
|
||||
let consensus = EntropyConsensus::with_config(config);
|
||||
|
||||
// Start with concentrated belief using set_beliefs to avoid intermediate normalization
|
||||
// H(-0.95*log2(0.95) - 0.05*log2(0.05)) ~= 0.286
|
||||
consensus.set_beliefs(&[(1, 0.95), (2, 0.05)]);
|
||||
|
||||
assert!(consensus.converged(), "Should be converged with entropy {}", consensus.entropy());
|
||||
assert!(consensus.get_decision().is_some());
|
||||
assert_eq!(consensus.get_decision().unwrap(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_negotiation() {
|
||||
let consensus = EntropyConsensus::new();
|
||||
|
||||
// Local: prefer option 1
|
||||
consensus.set_belief(1, 0.8);
|
||||
consensus.set_belief(2, 0.2);
|
||||
|
||||
// Peer: prefers option 2
|
||||
let mut peer_beliefs = FxHashMap::default();
|
||||
peer_beliefs.insert(1, 0.2);
|
||||
peer_beliefs.insert(2, 0.8);
|
||||
|
||||
// Negotiate - should move toward middle
|
||||
consensus.negotiate(&peer_beliefs);
|
||||
|
||||
let belief_1 = consensus.get_belief(1);
|
||||
let belief_2 = consensus.get_belief(2);
|
||||
|
||||
// After negotiation, beliefs should be closer to 0.5
|
||||
assert!(belief_1 < 0.8 && belief_1 > 0.2);
|
||||
assert!(belief_2 < 0.8 && belief_2 > 0.2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repeated_negotiation_converges() {
|
||||
let config = EntropyConsensusConfig {
|
||||
entropy_threshold: 0.3, // Threshold for convergence
|
||||
local_weight: 0.5,
|
||||
enable_annealing: false, // Disable annealing for predictable convergence
|
||||
..Default::default()
|
||||
};
|
||||
let consensus = EntropyConsensus::with_config(config);
|
||||
|
||||
// Start uniform using set_beliefs
|
||||
consensus.set_beliefs(&[(1, 0.5), (2, 0.5)]);
|
||||
|
||||
// Peer strongly prefers option 1
|
||||
let mut peer_beliefs = FxHashMap::default();
|
||||
peer_beliefs.insert(1, 0.95);
|
||||
peer_beliefs.insert(2, 0.05);
|
||||
|
||||
// Negotiate multiple times
|
||||
for _ in 0..50 {
|
||||
consensus.negotiate(&peer_beliefs);
|
||||
}
|
||||
|
||||
// Should have converged toward peer's preference
|
||||
let belief1 = consensus.get_belief(1);
|
||||
assert!(belief1 > 0.7, "Belief 1 should be > 0.7, got {}", belief1);
|
||||
assert!(consensus.converged(), "Should be converged with entropy {}", consensus.entropy());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timeout() {
|
||||
let config = EntropyConsensusConfig {
|
||||
max_negotiation_rounds: 5,
|
||||
..Default::default()
|
||||
};
|
||||
let consensus = EntropyConsensus::with_config(config);
|
||||
|
||||
consensus.set_belief(1, 0.5);
|
||||
consensus.set_belief(2, 0.5);
|
||||
|
||||
// Both parties have same beliefs - no convergence
|
||||
let peer_beliefs = consensus.get_all_beliefs();
|
||||
|
||||
for _ in 0..6 {
|
||||
consensus.negotiate(&peer_beliefs);
|
||||
}
|
||||
|
||||
assert!(consensus.has_timed_out());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decision_types() {
|
||||
let d1 = Decision::Accept(42);
|
||||
let d2 = Decision::Reject(42);
|
||||
let d3 = Decision::RouteToNode(5);
|
||||
|
||||
assert_ne!(d1.id(), d2.id());
|
||||
assert_ne!(d1.id(), d3.id());
|
||||
|
||||
let consensus = EntropyConsensus::new();
|
||||
consensus.add_option(d1, 0.7);
|
||||
consensus.add_option(d2, 0.3);
|
||||
|
||||
assert_eq!(consensus.option_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_temperature_annealing() {
|
||||
let config = EntropyConsensusConfig {
|
||||
enable_annealing: true,
|
||||
initial_temperature: 1.0,
|
||||
..Default::default()
|
||||
};
|
||||
let consensus = EntropyConsensus::with_config(config);
|
||||
|
||||
consensus.set_belief(1, 0.6);
|
||||
consensus.set_belief(2, 0.4);
|
||||
|
||||
let initial_temp = consensus.get_temperature();
|
||||
assert!((initial_temp - 1.0).abs() < 0.01);
|
||||
|
||||
let peer_beliefs = consensus.get_all_beliefs();
|
||||
for _ in 0..10 {
|
||||
consensus.negotiate(&peer_beliefs);
|
||||
}
|
||||
|
||||
let final_temp = consensus.get_temperature();
|
||||
assert!(final_temp < initial_temp); // Temperature should decrease
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consensus_coordinator() {
|
||||
let coordinator = ConsensusCoordinator::new(0.67);
|
||||
|
||||
let config = EntropyConsensusConfig::default();
|
||||
coordinator.start_consensus("task-routing", config);
|
||||
|
||||
assert_eq!(coordinator.current_phase(), ConsensusPhase::Proposal);
|
||||
}
|
||||
}
|
||||
378
vendor/ruvector/examples/edge-net/src/swarm/mod.rs
vendored
Normal file
378
vendor/ruvector/examples/edge-net/src/swarm/mod.rs
vendored
Normal file
@@ -0,0 +1,378 @@
|
||||
//! Swarm Intelligence Module for Edge-Net
|
||||
//!
|
||||
//! Provides collective intelligence capabilities for the P2P AI network:
|
||||
//!
|
||||
//! - **Entropy-Based Consensus**: Negotiate decisions by minimizing belief entropy
|
||||
//! - **Collective Memory**: Hippocampal-inspired pattern consolidation and sharing
|
||||
//!
|
||||
//! ## Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌─────────────────────────────────────────────────────────────────────┐
|
||||
//! │ Swarm Intelligence Layer │
|
||||
//! ├─────────────────────────────────────────────────────────────────────┤
|
||||
//! │ ┌─────────────────────┐ ┌─────────────────────────────────────┐ │
|
||||
//! │ │ Entropy Consensus │ │ Collective Memory │ │
|
||||
//! │ │ │ │ │ │
|
||||
//! │ │ - Belief mixing │ │ - Pattern sharing (RAC events) │ │
|
||||
//! │ │ - Shannon entropy │ │ - Consolidation queue │ │
|
||||
//! │ │ - Convergence │ │ - Hippocampal replay │ │
|
||||
//! │ │ - Annealing │ │ - HNSW indexing │ │
|
||||
//! │ └─────────────────────┘ └─────────────────────────────────────┘ │
|
||||
//! ├─────────────────────────────────────────────────────────────────────┤
|
||||
//! │ ┌─────────────────────────────────────────────────────────────┐ │
|
||||
//! │ │ Integration Points │ │
|
||||
//! │ │ │ │
|
||||
//! │ │ - RAC CoherenceEngine: Event logging, authority policies │ │
|
||||
//! │ │ - NetworkLearning: Pattern extraction, trajectories │ │
|
||||
//! │ │ - Network P2P: GUN.js/WebRTC message broadcast │ │
|
||||
//! │ └─────────────────────────────────────────────────────────────┘ │
|
||||
//! └─────────────────────────────────────────────────────────────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! ## Usage
|
||||
//!
|
||||
//! ### Entropy Consensus
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! use ruvector_edge_net::swarm::{EntropyConsensus, Decision};
|
||||
//!
|
||||
//! // Create consensus for task routing decision
|
||||
//! let consensus = EntropyConsensus::with_threshold(0.1);
|
||||
//!
|
||||
//! // Add options with initial beliefs
|
||||
//! consensus.set_belief(1, 0.6); // Route to node 1
|
||||
//! consensus.set_belief(2, 0.4); // Route to node 2
|
||||
//!
|
||||
//! // Negotiate with peer beliefs
|
||||
//! let peer_beliefs = peer.get_beliefs();
|
||||
//! consensus.negotiate(&peer_beliefs);
|
||||
//!
|
||||
//! // Check for convergence
|
||||
//! if consensus.converged() {
|
||||
//! let decision = consensus.get_decision().unwrap();
|
||||
//! println!("Consensus reached: route to node {}", decision);
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ### Collective Memory
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! use ruvector_edge_net::swarm::{CollectiveMemory, Pattern, RacEvent};
|
||||
//!
|
||||
//! let memory = CollectiveMemory::new("node-1");
|
||||
//!
|
||||
//! // Share a learned pattern
|
||||
//! let pattern = Pattern::new(
|
||||
//! "task-routing-v1".to_string(),
|
||||
//! vec![0.5, 0.3, 0.2], // Embedding
|
||||
//! 0.95, // Quality
|
||||
//! 100, // Sample count
|
||||
//! "node-1".to_string(),
|
||||
//! );
|
||||
//! let rac_event = memory.share_pattern(&pattern);
|
||||
//! swarm.publish(TOPIC_MODEL_SYNC, &serialize(&rac_event)?);
|
||||
//!
|
||||
//! // Receive pattern from peer
|
||||
//! let peer_event = deserialize::<RacEvent>(&data)?;
|
||||
//! if memory.receive_pattern(&peer_event) {
|
||||
//! println!("Pattern accepted for consolidation");
|
||||
//! }
|
||||
//!
|
||||
//! // Consolidate during idle periods
|
||||
//! let consolidated = memory.consolidate();
|
||||
//! println!("Consolidated {} patterns", consolidated);
|
||||
//! ```
|
||||
//!
|
||||
//! ## Integration with RAC
|
||||
//!
|
||||
//! The swarm module uses RAC (RuVector Adversarial Coherence) for:
|
||||
//!
|
||||
//! 1. **Pattern Assertions**: Shared patterns are RAC Assert events
|
||||
//! 2. **Challenge/Support**: Disputed patterns can be challenged
|
||||
//! 3. **Authority Policies**: Only trusted nodes can deprecate patterns
|
||||
//! 4. **Audit Trail**: All pattern sharing is logged in Merkle tree
|
||||
//!
|
||||
//! ## References
|
||||
//!
|
||||
//! - DeGroot consensus model
|
||||
//! - Complementary learning systems theory
|
||||
//! - Federated learning pattern aggregation
|
||||
|
||||
pub mod consensus;
|
||||
pub mod collective;
|
||||
pub mod stigmergy;
|
||||
|
||||
// Re-export main types
|
||||
pub use consensus::{
|
||||
EntropyConsensus,
|
||||
EntropyConsensusConfig,
|
||||
Decision,
|
||||
ConsensusPhase,
|
||||
ConsensusCoordinator,
|
||||
};
|
||||
|
||||
pub use collective::{
|
||||
CollectiveMemory,
|
||||
CollectiveMemoryConfig,
|
||||
CollectiveStats,
|
||||
Pattern,
|
||||
HnswIndex,
|
||||
ClaimType,
|
||||
RacEvent,
|
||||
Swarm,
|
||||
TOPIC_MODEL_SYNC,
|
||||
};
|
||||
|
||||
pub use stigmergy::{
|
||||
PeerId,
|
||||
PheromoneDeposit,
|
||||
PheromoneState,
|
||||
PheromoneTrail,
|
||||
RingBuffer,
|
||||
Stigmergy,
|
||||
StigmergyStats,
|
||||
WasmStigmergy,
|
||||
};
|
||||
|
||||
use wasm_bindgen::prelude::*;
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
// ============================================================================
|
||||
// Integrated Swarm Intelligence
|
||||
// ============================================================================
|
||||
|
||||
/// Unified swarm intelligence coordinator
|
||||
#[wasm_bindgen]
|
||||
pub struct SwarmIntelligence {
|
||||
/// Entropy-based consensus engine
|
||||
consensus: EntropyConsensus,
|
||||
/// Collective memory for pattern sharing
|
||||
memory: CollectiveMemory,
|
||||
/// Local node ID
|
||||
node_id: String,
|
||||
/// Active consensus topics
|
||||
active_topics: std::sync::RwLock<FxHashMap<String, EntropyConsensus>>,
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
impl SwarmIntelligence {
|
||||
/// Create new swarm intelligence coordinator
|
||||
#[wasm_bindgen(constructor)]
|
||||
pub fn new(node_id: &str) -> Self {
|
||||
Self {
|
||||
consensus: EntropyConsensus::new(),
|
||||
memory: CollectiveMemory::new(node_id),
|
||||
node_id: node_id.to_string(),
|
||||
active_topics: std::sync::RwLock::new(FxHashMap::default()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get node ID
|
||||
#[wasm_bindgen(js_name = nodeId)]
|
||||
pub fn node_id(&self) -> String {
|
||||
self.node_id.clone()
|
||||
}
|
||||
|
||||
/// Start a new consensus round for a topic
|
||||
#[wasm_bindgen(js_name = startConsensus)]
|
||||
pub fn start_consensus(&self, topic: &str, threshold: f32) {
|
||||
let config = EntropyConsensusConfig {
|
||||
entropy_threshold: threshold.clamp(0.01, 2.0),
|
||||
..Default::default()
|
||||
};
|
||||
let consensus = EntropyConsensus::with_config(config);
|
||||
self.active_topics.write().unwrap().insert(topic.to_string(), consensus);
|
||||
}
|
||||
|
||||
/// Set belief for a topic's decision
|
||||
#[wasm_bindgen(js_name = setBelief)]
|
||||
pub fn set_belief(&self, topic: &str, decision_id: u64, probability: f32) {
|
||||
if let Some(consensus) = self.active_topics.write().unwrap().get(topic) {
|
||||
consensus.set_belief(decision_id, probability);
|
||||
}
|
||||
}
|
||||
|
||||
/// Negotiate beliefs for a topic
|
||||
#[wasm_bindgen(js_name = negotiateBeliefs)]
|
||||
pub fn negotiate_beliefs(&self, topic: &str, beliefs_json: &str) -> bool {
|
||||
let beliefs: FxHashMap<u64, f32> = match serde_json::from_str(beliefs_json) {
|
||||
Ok(b) => b,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
if let Some(consensus) = self.active_topics.write().unwrap().get(topic) {
|
||||
consensus.negotiate(&beliefs);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if topic has reached consensus
|
||||
#[wasm_bindgen(js_name = hasConsensus)]
|
||||
pub fn has_consensus(&self, topic: &str) -> bool {
|
||||
self.active_topics.read().unwrap()
|
||||
.get(topic)
|
||||
.map(|c| c.converged())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Get consensus decision for topic
|
||||
#[wasm_bindgen(js_name = getConsensusDecision)]
|
||||
pub fn get_consensus_decision(&self, topic: &str) -> Option<u64> {
|
||||
self.active_topics.read().unwrap()
|
||||
.get(topic)
|
||||
.and_then(|c| c.get_decision())
|
||||
}
|
||||
|
||||
/// Add pattern to collective memory
|
||||
#[wasm_bindgen(js_name = addPattern)]
|
||||
pub fn add_pattern(&self, pattern_json: &str) -> bool {
|
||||
let pattern: Pattern = match serde_json::from_str(pattern_json) {
|
||||
Ok(p) => p,
|
||||
Err(_) => return false,
|
||||
};
|
||||
self.memory.add_pattern(pattern)
|
||||
}
|
||||
|
||||
/// Search collective memory
|
||||
#[wasm_bindgen(js_name = searchPatterns)]
|
||||
pub fn search_patterns(&self, query_json: &str, k: usize) -> String {
|
||||
self.memory.search(query_json, k)
|
||||
}
|
||||
|
||||
/// Run memory consolidation
|
||||
#[wasm_bindgen]
|
||||
pub fn consolidate(&self) -> usize {
|
||||
self.memory.consolidate()
|
||||
}
|
||||
|
||||
/// Run hippocampal replay
|
||||
#[wasm_bindgen]
|
||||
pub fn replay(&self) -> usize {
|
||||
self.memory.hippocampal_replay()
|
||||
}
|
||||
|
||||
/// Get collective memory pattern count
|
||||
#[wasm_bindgen(js_name = patternCount)]
|
||||
pub fn pattern_count(&self) -> usize {
|
||||
self.memory.pattern_count()
|
||||
}
|
||||
|
||||
/// Get queue size
|
||||
#[wasm_bindgen(js_name = queueSize)]
|
||||
pub fn queue_size(&self) -> usize {
|
||||
self.memory.queue_size()
|
||||
}
|
||||
|
||||
/// Get combined statistics as JSON
|
||||
#[wasm_bindgen(js_name = getStats)]
|
||||
pub fn get_stats(&self) -> String {
|
||||
let memory_stats = self.memory.get_stats();
|
||||
let active_topics = self.active_topics.read().unwrap().len();
|
||||
|
||||
format!(
|
||||
r#"{{"node_id":"{}","active_topics":{},"memory":{}}}"#,
|
||||
self.node_id, active_topics, memory_stats
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SwarmIntelligence {
|
||||
/// Get reference to memory
|
||||
pub fn memory(&self) -> &CollectiveMemory {
|
||||
&self.memory
|
||||
}
|
||||
|
||||
/// Get consensus for a topic
|
||||
pub fn get_consensus(&self, topic: &str) -> Option<EntropyConsensus> {
|
||||
self.active_topics.read().unwrap()
|
||||
.get(topic)
|
||||
.map(|c| {
|
||||
// Create new consensus with same config
|
||||
let config = EntropyConsensusConfig {
|
||||
entropy_threshold: c.get_entropy_threshold(),
|
||||
..Default::default()
|
||||
};
|
||||
EntropyConsensus::with_config(config)
|
||||
})
|
||||
}
|
||||
|
||||
/// Set multiple beliefs for a topic at once (avoids intermediate normalization)
|
||||
pub fn set_beliefs(&self, topic: &str, beliefs: &[(u64, f32)]) {
|
||||
if let Some(consensus) = self.active_topics.write().unwrap().get(topic) {
|
||||
consensus.set_beliefs(beliefs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_swarm_intelligence_creation() {
|
||||
let swarm = SwarmIntelligence::new("node-1");
|
||||
assert_eq!(swarm.node_id(), "node-1");
|
||||
assert_eq!(swarm.pattern_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_consensus_lifecycle() {
|
||||
let swarm = SwarmIntelligence::new("node-1");
|
||||
|
||||
// Start consensus with a threshold that will allow convergence
|
||||
// Entropy of 0.95:0.05 distribution is ~0.286, so use threshold > 0.3
|
||||
swarm.start_consensus("task-routing", 0.5);
|
||||
|
||||
// Set beliefs using set_beliefs to avoid intermediate normalization
|
||||
// Use very concentrated beliefs to ensure convergence
|
||||
swarm.set_beliefs("task-routing", &[(1, 0.95), (2, 0.05)]);
|
||||
|
||||
// Check convergence (concentrated beliefs should converge)
|
||||
assert!(swarm.has_consensus("task-routing"), "Should have consensus for task-routing");
|
||||
assert_eq!(swarm.get_consensus_decision("task-routing"), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pattern_lifecycle() {
|
||||
let swarm = SwarmIntelligence::new("node-1");
|
||||
|
||||
// Add pattern
|
||||
let pattern_json = r#"{
|
||||
"id": "test-pattern",
|
||||
"embedding": [1.0, 2.0, 3.0],
|
||||
"quality": 0.9,
|
||||
"samples": 100,
|
||||
"evidence": [],
|
||||
"source_node": "node-1",
|
||||
"created_at": 0,
|
||||
"optimal_allocation": 0.5,
|
||||
"optimal_energy": 100,
|
||||
"task_type": null
|
||||
}"#;
|
||||
|
||||
assert!(swarm.add_pattern(pattern_json));
|
||||
assert_eq!(swarm.queue_size(), 1);
|
||||
|
||||
// Consolidate
|
||||
let consolidated = swarm.consolidate();
|
||||
assert!(consolidated > 0 || swarm.pattern_count() > 0 || swarm.queue_size() == 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats() {
|
||||
let swarm = SwarmIntelligence::new("test-node");
|
||||
swarm.start_consensus("topic-1", 0.1);
|
||||
|
||||
let stats = swarm.get_stats();
|
||||
assert!(stats.contains("test-node"));
|
||||
assert!(stats.contains("active_topics"));
|
||||
assert!(stats.contains("memory"));
|
||||
}
|
||||
}
|
||||
886
vendor/ruvector/examples/edge-net/src/swarm/stigmergy.rs
vendored
Normal file
886
vendor/ruvector/examples/edge-net/src/swarm/stigmergy.rs
vendored
Normal file
@@ -0,0 +1,886 @@
|
||||
//! Stigmergy-based coordination using digital pheromones for self-organizing task allocation
|
||||
//!
|
||||
//! Stigmergy is an indirect coordination mechanism where agents leave traces (pheromones)
|
||||
//! in the environment that influence the behavior of other agents. This creates emergent
|
||||
//! specialization without explicit communication.
|
||||
//!
|
||||
//! ## Features
|
||||
//!
|
||||
//! - **Emergent Specialization**: Nodes naturally gravitate to successful task types
|
||||
//! - **Self-Healing**: Failed task types lose pheromones, causing nodes to redistribute
|
||||
//! - **P2P Sync**: Pheromone trails shared via gossip protocol
|
||||
//! - **Anti-Sybil**: Deposit proportional to stake/reputation
|
||||
//! - **Task Routing**: High-pheromone nodes get priority for matching tasks
|
||||
|
||||
use crate::tasks::TaskType;
|
||||
use parking_lot::RwLock;
|
||||
use rustc_hash::FxHashMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use wasm_bindgen::prelude::*;
|
||||
|
||||
/// Type alias for peer identifiers (matches WasmNodeIdentity.node_id)
|
||||
pub type PeerId = String;
|
||||
|
||||
/// Ring buffer for bounded history storage
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct RingBuffer<T> {
|
||||
buffer: VecDeque<T>,
|
||||
capacity: usize,
|
||||
}
|
||||
|
||||
impl<T> RingBuffer<T> {
|
||||
/// Create a new ring buffer with specified capacity
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
Self {
|
||||
buffer: VecDeque::with_capacity(capacity),
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
|
||||
/// Push an item, evicting oldest if at capacity
|
||||
pub fn push(&mut self, item: T) {
|
||||
if self.buffer.len() >= self.capacity {
|
||||
self.buffer.pop_front();
|
||||
}
|
||||
self.buffer.push_back(item);
|
||||
}
|
||||
|
||||
/// Get number of items in buffer
|
||||
pub fn len(&self) -> usize {
|
||||
self.buffer.len()
|
||||
}
|
||||
|
||||
/// Check if buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.buffer.is_empty()
|
||||
}
|
||||
|
||||
/// Iterate over items
|
||||
pub fn iter(&self) -> impl Iterator<Item = &T> {
|
||||
self.buffer.iter()
|
||||
}
|
||||
|
||||
/// Clear all items
|
||||
pub fn clear(&mut self) {
|
||||
self.buffer.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/// A deposit record in the pheromone trail
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PheromoneDeposit {
|
||||
/// Peer who made the deposit
|
||||
pub peer_id: PeerId,
|
||||
/// Amount deposited
|
||||
pub amount: f32,
|
||||
/// When the deposit was made
|
||||
pub timestamp: Instant,
|
||||
/// Stake/reputation weight (anti-sybil)
|
||||
pub stake_weight: f32,
|
||||
}
|
||||
|
||||
/// Pheromone trail for a specific task type
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PheromoneTrail {
|
||||
/// Current intensity (sum of active pheromones)
|
||||
pub intensity: f32,
|
||||
/// When the trail was last updated
|
||||
pub last_deposit: Instant,
|
||||
/// History of recent deposits (for analysis)
|
||||
pub deposit_history: RingBuffer<PheromoneDeposit>,
|
||||
/// Success rate for this task type (rolling average)
|
||||
pub success_rate: f32,
|
||||
/// Total tasks completed on this trail
|
||||
pub total_completions: u64,
|
||||
/// Total tasks failed on this trail
|
||||
pub total_failures: u64,
|
||||
}
|
||||
|
||||
impl Default for PheromoneTrail {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
intensity: 0.0,
|
||||
last_deposit: Instant::now(),
|
||||
deposit_history: RingBuffer::new(100), // Keep last 100 deposits
|
||||
success_rate: 0.5, // Start neutral
|
||||
total_completions: 0,
|
||||
total_failures: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PheromoneTrail {
|
||||
/// Update success rate with exponential moving average
|
||||
pub fn record_outcome(&mut self, success: bool) {
|
||||
const ALPHA: f32 = 0.1; // Smoothing factor
|
||||
let outcome = if success { 1.0 } else { 0.0 };
|
||||
self.success_rate = (1.0 - ALPHA) * self.success_rate + ALPHA * outcome;
|
||||
|
||||
if success {
|
||||
self.total_completions += 1;
|
||||
} else {
|
||||
self.total_failures += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get weighted intensity (considering success rate)
|
||||
pub fn weighted_intensity(&self) -> f32 {
|
||||
self.intensity * self.success_rate
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializable pheromone state for P2P sync
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct PheromoneState {
|
||||
/// Task type as string for serialization
|
||||
pub task_type: String,
|
||||
/// Current intensity
|
||||
pub intensity: f32,
|
||||
/// Success rate
|
||||
pub success_rate: f32,
|
||||
/// Last update timestamp (unix ms)
|
||||
pub last_update_ms: u64,
|
||||
}
|
||||
|
||||
/// Stigmergy coordination engine
|
||||
///
|
||||
/// Implements indirect coordination through digital pheromones.
|
||||
/// Agents deposit pheromones after successful task completions,
|
||||
/// and follow pheromone gradients to decide which tasks to accept.
|
||||
pub struct Stigmergy {
|
||||
/// Pheromone trails indexed by task type
|
||||
pheromones: Arc<RwLock<FxHashMap<TaskType, PheromoneTrail>>>,
|
||||
/// Decay rate per epoch (0.1 = 10% decay)
|
||||
decay_rate: f32,
|
||||
/// Base deposit rate (multiplied by success rate)
|
||||
deposit_rate: f32,
|
||||
/// How often evaporation occurs
|
||||
evaporation_interval: Duration,
|
||||
/// Last evaporation time
|
||||
last_evaporation: RwLock<Instant>,
|
||||
/// Minimum stake required for deposit (anti-sybil)
|
||||
min_stake: u64,
|
||||
/// Our node's specialization scores (learned preferences)
|
||||
node_specializations: Arc<RwLock<FxHashMap<TaskType, f32>>>,
|
||||
}
|
||||
|
||||
impl Default for Stigmergy {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Stigmergy {
|
||||
/// Create a new stigmergy engine with default parameters
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pheromones: Arc::new(RwLock::new(FxHashMap::default())),
|
||||
decay_rate: 0.1, // 10% decay per epoch
|
||||
deposit_rate: 1.0, // Base deposit amount
|
||||
evaporation_interval: Duration::from_secs(3600), // 1 hour
|
||||
last_evaporation: RwLock::new(Instant::now()),
|
||||
min_stake: 0,
|
||||
node_specializations: Arc::new(RwLock::new(FxHashMap::default())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create with custom parameters
|
||||
pub fn with_params(decay_rate: f32, deposit_rate: f32, evaporation_hours: f32) -> Self {
|
||||
Self {
|
||||
pheromones: Arc::new(RwLock::new(FxHashMap::default())),
|
||||
decay_rate: decay_rate.clamp(0.0, 1.0),
|
||||
deposit_rate: deposit_rate.max(0.0),
|
||||
evaporation_interval: Duration::from_secs_f32(evaporation_hours * 3600.0),
|
||||
last_evaporation: RwLock::new(Instant::now()),
|
||||
min_stake: 0,
|
||||
node_specializations: Arc::new(RwLock::new(FxHashMap::default())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set minimum stake for anti-sybil protection
|
||||
pub fn set_min_stake(&mut self, min_stake: u64) {
|
||||
self.min_stake = min_stake;
|
||||
}
|
||||
|
||||
/// Deposit pheromone after successful task completion
|
||||
///
|
||||
/// The deposit amount is proportional to:
|
||||
/// - Base deposit rate
|
||||
/// - Success rate of the completing peer
|
||||
/// - Stake weight for anti-sybil protection
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `task_type` - Type of task completed
|
||||
/// * `peer_id` - ID of the completing peer
|
||||
/// * `success_rate` - Peer's success rate (0.0 - 1.0)
|
||||
/// * `stake` - Peer's stake for anti-sybil weighting
|
||||
pub fn deposit(&self, task_type: TaskType, peer_id: PeerId, success_rate: f32, stake: u64) {
|
||||
// Anti-sybil: require minimum stake
|
||||
if stake < self.min_stake {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut trails = self.pheromones.write();
|
||||
let trail = trails.entry(task_type).or_default();
|
||||
|
||||
// Calculate stake weight (logarithmic to prevent whale dominance)
|
||||
let stake_weight = (stake as f32).ln_1p() / 10.0;
|
||||
let stake_weight = stake_weight.clamp(0.1, 2.0);
|
||||
|
||||
// Calculate deposit amount
|
||||
let deposit_amount = self.deposit_rate * success_rate * stake_weight;
|
||||
|
||||
// Update trail
|
||||
trail.intensity += deposit_amount;
|
||||
trail.last_deposit = Instant::now();
|
||||
|
||||
// Record in history
|
||||
trail.deposit_history.push(PheromoneDeposit {
|
||||
peer_id,
|
||||
amount: deposit_amount,
|
||||
timestamp: Instant::now(),
|
||||
stake_weight,
|
||||
});
|
||||
}
|
||||
|
||||
/// Deposit with outcome recording (success or failure)
|
||||
pub fn deposit_with_outcome(
|
||||
&self,
|
||||
task_type: TaskType,
|
||||
peer_id: PeerId,
|
||||
success: bool,
|
||||
stake: u64,
|
||||
) {
|
||||
let mut trails = self.pheromones.write();
|
||||
let trail = trails.entry(task_type).or_default();
|
||||
|
||||
// Record the outcome
|
||||
trail.record_outcome(success);
|
||||
|
||||
if success && stake >= self.min_stake {
|
||||
let stake_weight = (stake as f32).ln_1p() / 10.0;
|
||||
let stake_weight = stake_weight.clamp(0.1, 2.0);
|
||||
let deposit_amount = self.deposit_rate * trail.success_rate * stake_weight;
|
||||
|
||||
trail.intensity += deposit_amount;
|
||||
trail.last_deposit = Instant::now();
|
||||
|
||||
trail.deposit_history.push(PheromoneDeposit {
|
||||
peer_id,
|
||||
amount: deposit_amount,
|
||||
timestamp: Instant::now(),
|
||||
stake_weight,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Follow pheromone gradient to decide task acceptance probability
|
||||
///
|
||||
/// Returns a probability (0.0 - 1.0) based on pheromone intensity.
|
||||
/// Uses sigmoid function for smooth probability curve.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `task_type` - Type of task to evaluate
|
||||
///
|
||||
/// # Returns
|
||||
/// Probability of accepting this task type (0.0 - 1.0)
|
||||
pub fn follow(&self, task_type: TaskType) -> f32 {
|
||||
let trails = self.pheromones.read();
|
||||
let intensity = trails
|
||||
.get(&task_type)
|
||||
.map(|t| t.weighted_intensity())
|
||||
.unwrap_or(0.0);
|
||||
|
||||
// Sigmoid function for probability
|
||||
// Higher intensity -> higher probability
|
||||
1.0 / (1.0 + (-intensity).exp())
|
||||
}
|
||||
|
||||
/// Get raw pheromone intensity for a task type
|
||||
pub fn get_intensity(&self, task_type: TaskType) -> f32 {
|
||||
self.pheromones
|
||||
.read()
|
||||
.get(&task_type)
|
||||
.map(|t| t.intensity)
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
/// Get success rate for a task type
|
||||
pub fn get_success_rate(&self, task_type: TaskType) -> f32 {
|
||||
self.pheromones
|
||||
.read()
|
||||
.get(&task_type)
|
||||
.map(|t| t.success_rate)
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
|
||||
/// Evaporate old pheromones (called periodically)
|
||||
///
|
||||
/// Pheromone intensity decays exponentially based on time since last deposit.
|
||||
/// This ensures that inactive trails fade over time, allowing the network
|
||||
/// to adapt to changing conditions.
|
||||
pub fn evaporate(&self) {
|
||||
let mut trails = self.pheromones.write();
|
||||
let now = Instant::now();
|
||||
|
||||
for (_task_type, trail) in trails.iter_mut() {
|
||||
let elapsed_hours = trail.last_deposit.elapsed().as_secs_f32() / 3600.0;
|
||||
let decay_factor = (1.0 - self.decay_rate).powf(elapsed_hours);
|
||||
trail.intensity *= decay_factor;
|
||||
|
||||
// Update last deposit time to now (for next decay calculation)
|
||||
// Note: This is a simplification; in practice you might want to
|
||||
// track the actual decay progression separately
|
||||
trail.last_deposit = now;
|
||||
}
|
||||
|
||||
// Clean up very weak trails (intensity < 0.01)
|
||||
trails.retain(|_, trail| trail.intensity >= 0.01);
|
||||
|
||||
*self.last_evaporation.write() = now;
|
||||
}
|
||||
|
||||
/// Check if evaporation is due and run if needed
|
||||
pub fn maybe_evaporate(&self) -> bool {
|
||||
let last = *self.last_evaporation.read();
|
||||
if last.elapsed() >= self.evaporation_interval {
|
||||
self.evaporate();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// P2P sync: merge pheromone trails from peers
|
||||
///
|
||||
/// Uses weighted average to combine local and remote state.
|
||||
/// Local state is weighted higher (0.7) to prevent manipulation.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `peer_trails` - Map of task types to intensity values from peer
|
||||
pub fn merge(&self, peer_trails: &FxHashMap<TaskType, f32>) {
|
||||
const LOCAL_WEIGHT: f32 = 0.7;
|
||||
const REMOTE_WEIGHT: f32 = 0.3;
|
||||
|
||||
let mut trails = self.pheromones.write();
|
||||
|
||||
for (task_type, remote_intensity) in peer_trails {
|
||||
let trail = trails.entry(*task_type).or_default();
|
||||
// Weighted average with local priority
|
||||
trail.intensity = LOCAL_WEIGHT * trail.intensity + REMOTE_WEIGHT * remote_intensity;
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge with full state (including success rates)
|
||||
pub fn merge_state(&self, peer_states: &[PheromoneState]) {
|
||||
const LOCAL_WEIGHT: f32 = 0.7;
|
||||
const REMOTE_WEIGHT: f32 = 0.3;
|
||||
|
||||
let mut trails = self.pheromones.write();
|
||||
|
||||
for state in peer_states {
|
||||
if let Some(task_type) = parse_task_type(&state.task_type) {
|
||||
let trail = trails.entry(task_type).or_default();
|
||||
trail.intensity = LOCAL_WEIGHT * trail.intensity + REMOTE_WEIGHT * state.intensity;
|
||||
trail.success_rate =
|
||||
LOCAL_WEIGHT * trail.success_rate + REMOTE_WEIGHT * state.success_rate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Export current state for P2P sharing
|
||||
pub fn export_state(&self) -> Vec<PheromoneState> {
|
||||
let trails = self.pheromones.read();
|
||||
let now = js_sys::Date::now() as u64;
|
||||
|
||||
trails
|
||||
.iter()
|
||||
.map(|(task_type, trail)| PheromoneState {
|
||||
task_type: format!("{:?}", task_type),
|
||||
intensity: trail.intensity,
|
||||
success_rate: trail.success_rate,
|
||||
last_update_ms: now,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get the best task type for this node based on pheromone gradients
|
||||
///
|
||||
/// Returns the task type with highest weighted intensity,
|
||||
/// indicating where the node should specialize.
|
||||
pub fn get_best_specialization(&self) -> Option<TaskType> {
|
||||
let trails = self.pheromones.read();
|
||||
trails
|
||||
.iter()
|
||||
.max_by(|a, b| {
|
||||
a.1.weighted_intensity()
|
||||
.partial_cmp(&b.1.weighted_intensity())
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
})
|
||||
.map(|(task_type, _)| *task_type)
|
||||
}
|
||||
|
||||
/// Get all task types ranked by attractiveness
|
||||
pub fn get_ranked_tasks(&self) -> Vec<(TaskType, f32)> {
|
||||
let trails = self.pheromones.read();
|
||||
let mut ranked: Vec<_> = trails
|
||||
.iter()
|
||||
.map(|(tt, trail)| (*tt, trail.weighted_intensity()))
|
||||
.collect();
|
||||
ranked.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
|
||||
ranked
|
||||
}
|
||||
|
||||
/// Update node's specialization preference based on task outcome
|
||||
pub fn update_specialization(&self, task_type: TaskType, success: bool) {
|
||||
let mut specs = self.node_specializations.write();
|
||||
let score = specs.entry(task_type).or_insert(0.5);
|
||||
|
||||
// Adjust specialization score based on outcome
|
||||
const LEARNING_RATE: f32 = 0.1;
|
||||
let target = if success { 1.0 } else { 0.0 };
|
||||
*score = (1.0 - LEARNING_RATE) * *score + LEARNING_RATE * target;
|
||||
}
|
||||
|
||||
/// Get node's specialization score for a task type
|
||||
pub fn get_specialization(&self, task_type: TaskType) -> f32 {
|
||||
self.node_specializations
|
||||
.read()
|
||||
.get(&task_type)
|
||||
.copied()
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
|
||||
/// Combined decision: should we accept this task?
|
||||
///
|
||||
/// Considers both:
|
||||
/// - Global pheromone gradient (follow())
|
||||
/// - Local specialization score
|
||||
///
|
||||
/// Returns probability of accepting the task.
|
||||
pub fn should_accept(&self, task_type: TaskType) -> f32 {
|
||||
let pheromone_prob = self.follow(task_type);
|
||||
let specialization = self.get_specialization(task_type);
|
||||
|
||||
// Weighted combination (pheromone slightly more important)
|
||||
0.6 * pheromone_prob + 0.4 * specialization
|
||||
}
|
||||
|
||||
/// Get statistics about the pheromone system
|
||||
pub fn get_stats(&self) -> StigmergyStats {
|
||||
let trails = self.pheromones.read();
|
||||
let specs = self.node_specializations.read();
|
||||
|
||||
let total_intensity: f32 = trails.values().map(|t| t.intensity).sum();
|
||||
let avg_success_rate: f32 = if trails.is_empty() {
|
||||
0.5
|
||||
} else {
|
||||
trails.values().map(|t| t.success_rate).sum::<f32>() / trails.len() as f32
|
||||
};
|
||||
|
||||
let total_completions: u64 = trails.values().map(|t| t.total_completions).sum();
|
||||
let total_failures: u64 = trails.values().map(|t| t.total_failures).sum();
|
||||
|
||||
StigmergyStats {
|
||||
trail_count: trails.len(),
|
||||
total_intensity,
|
||||
avg_success_rate,
|
||||
total_completions,
|
||||
total_failures,
|
||||
specialization_count: specs.len(),
|
||||
strongest_trail: trails
|
||||
.iter()
|
||||
.max_by(|a, b| {
|
||||
a.1.intensity
|
||||
.partial_cmp(&b.1.intensity)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
})
|
||||
.map(|(tt, _)| format!("{:?}", tt)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics about the stigmergy system
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct StigmergyStats {
|
||||
/// Number of active pheromone trails
|
||||
pub trail_count: usize,
|
||||
/// Total pheromone intensity across all trails
|
||||
pub total_intensity: f32,
|
||||
/// Average success rate across all trails
|
||||
pub avg_success_rate: f32,
|
||||
/// Total successful task completions
|
||||
pub total_completions: u64,
|
||||
/// Total failed task completions
|
||||
pub total_failures: u64,
|
||||
/// Number of specialization entries
|
||||
pub specialization_count: usize,
|
||||
/// The strongest trail (most pheromone)
|
||||
pub strongest_trail: Option<String>,
|
||||
}
|
||||
|
||||
/// Parse task type from string
|
||||
fn parse_task_type(s: &str) -> Option<TaskType> {
|
||||
match s {
|
||||
"VectorSearch" => Some(TaskType::VectorSearch),
|
||||
"VectorInsert" => Some(TaskType::VectorInsert),
|
||||
"Embedding" => Some(TaskType::Embedding),
|
||||
"SemanticMatch" => Some(TaskType::SemanticMatch),
|
||||
"NeuralInference" => Some(TaskType::NeuralInference),
|
||||
"Encryption" => Some(TaskType::Encryption),
|
||||
"Compression" => Some(TaskType::Compression),
|
||||
"CustomWasm" => Some(TaskType::CustomWasm),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// WASM-bindgen wrapper for stigmergy coordination
|
||||
#[wasm_bindgen]
|
||||
pub struct WasmStigmergy {
|
||||
inner: Stigmergy,
|
||||
}
|
||||
|
||||
#[wasm_bindgen]
|
||||
impl WasmStigmergy {
|
||||
/// Create a new stigmergy engine
|
||||
#[wasm_bindgen(constructor)]
|
||||
pub fn new() -> WasmStigmergy {
|
||||
WasmStigmergy {
|
||||
inner: Stigmergy::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create with custom parameters
|
||||
#[wasm_bindgen(js_name = withParams)]
|
||||
pub fn with_params(decay_rate: f32, deposit_rate: f32, evaporation_hours: f32) -> WasmStigmergy {
|
||||
WasmStigmergy {
|
||||
inner: Stigmergy::with_params(decay_rate, deposit_rate, evaporation_hours),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set minimum stake for anti-sybil
|
||||
#[wasm_bindgen(js_name = setMinStake)]
|
||||
pub fn set_min_stake(&mut self, min_stake: u64) {
|
||||
self.inner.set_min_stake(min_stake);
|
||||
}
|
||||
|
||||
/// Deposit pheromone after task completion
|
||||
#[wasm_bindgen]
|
||||
pub fn deposit(&self, task_type: &str, peer_id: &str, success_rate: f32, stake: u64) {
|
||||
if let Some(tt) = parse_task_type(task_type) {
|
||||
self.inner.deposit(tt, peer_id.to_string(), success_rate, stake);
|
||||
}
|
||||
}
|
||||
|
||||
/// Deposit with success/failure outcome
|
||||
#[wasm_bindgen(js_name = depositWithOutcome)]
|
||||
pub fn deposit_with_outcome(&self, task_type: &str, peer_id: &str, success: bool, stake: u64) {
|
||||
if let Some(tt) = parse_task_type(task_type) {
|
||||
self.inner
|
||||
.deposit_with_outcome(tt, peer_id.to_string(), success, stake);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get acceptance probability for a task type
|
||||
#[wasm_bindgen]
|
||||
pub fn follow(&self, task_type: &str) -> f32 {
|
||||
parse_task_type(task_type)
|
||||
.map(|tt| self.inner.follow(tt))
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
|
||||
/// Get raw pheromone intensity
|
||||
#[wasm_bindgen(js_name = getIntensity)]
|
||||
pub fn get_intensity(&self, task_type: &str) -> f32 {
|
||||
parse_task_type(task_type)
|
||||
.map(|tt| self.inner.get_intensity(tt))
|
||||
.unwrap_or(0.0)
|
||||
}
|
||||
|
||||
/// Get success rate for a task type
|
||||
#[wasm_bindgen(js_name = getSuccessRate)]
|
||||
pub fn get_success_rate(&self, task_type: &str) -> f32 {
|
||||
parse_task_type(task_type)
|
||||
.map(|tt| self.inner.get_success_rate(tt))
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
|
||||
/// Run evaporation (call periodically)
|
||||
#[wasm_bindgen]
|
||||
pub fn evaporate(&self) {
|
||||
self.inner.evaporate();
|
||||
}
|
||||
|
||||
/// Check and run evaporation if due
|
||||
#[wasm_bindgen(js_name = maybeEvaporate)]
|
||||
pub fn maybe_evaporate(&self) -> bool {
|
||||
self.inner.maybe_evaporate()
|
||||
}
|
||||
|
||||
/// Merge peer pheromone state (JSON format)
|
||||
#[wasm_bindgen]
|
||||
pub fn merge(&self, peer_state_json: &str) -> bool {
|
||||
if let Ok(states) = serde_json::from_str::<Vec<PheromoneState>>(peer_state_json) {
|
||||
self.inner.merge_state(&states);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Export current state for P2P sharing
|
||||
#[wasm_bindgen(js_name = exportState)]
|
||||
pub fn export_state(&self) -> String {
|
||||
let states = self.inner.export_state();
|
||||
serde_json::to_string(&states).unwrap_or_else(|_| "[]".to_string())
|
||||
}
|
||||
|
||||
/// Get best specialization recommendation
|
||||
#[wasm_bindgen(js_name = getBestSpecialization)]
|
||||
pub fn get_best_specialization(&self) -> Option<String> {
|
||||
self.inner
|
||||
.get_best_specialization()
|
||||
.map(|tt| format!("{:?}", tt))
|
||||
}
|
||||
|
||||
/// Get all task types ranked by attractiveness
|
||||
#[wasm_bindgen(js_name = getRankedTasks)]
|
||||
pub fn get_ranked_tasks(&self) -> String {
|
||||
let ranked = self.inner.get_ranked_tasks();
|
||||
let result: Vec<(String, f32)> = ranked
|
||||
.into_iter()
|
||||
.map(|(tt, score)| (format!("{:?}", tt), score))
|
||||
.collect();
|
||||
serde_json::to_string(&result).unwrap_or_else(|_| "[]".to_string())
|
||||
}
|
||||
|
||||
/// Update node specialization based on outcome
|
||||
#[wasm_bindgen(js_name = updateSpecialization)]
|
||||
pub fn update_specialization(&self, task_type: &str, success: bool) {
|
||||
if let Some(tt) = parse_task_type(task_type) {
|
||||
self.inner.update_specialization(tt, success);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get node's specialization score
|
||||
#[wasm_bindgen(js_name = getSpecialization)]
|
||||
pub fn get_specialization(&self, task_type: &str) -> f32 {
|
||||
parse_task_type(task_type)
|
||||
.map(|tt| self.inner.get_specialization(tt))
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
|
||||
/// Should this node accept a task? (combined decision)
|
||||
#[wasm_bindgen(js_name = shouldAccept)]
|
||||
pub fn should_accept(&self, task_type: &str) -> f32 {
|
||||
parse_task_type(task_type)
|
||||
.map(|tt| self.inner.should_accept(tt))
|
||||
.unwrap_or(0.5)
|
||||
}
|
||||
|
||||
/// Get statistics as JSON
|
||||
#[wasm_bindgen(js_name = getStats)]
|
||||
pub fn get_stats(&self) -> String {
|
||||
let stats = self.inner.get_stats();
|
||||
serde_json::to_string(&stats).unwrap_or_else(|_| "{}".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WasmStigmergy {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_stigmergy_basic() {
|
||||
let stigmergy = Stigmergy::new();
|
||||
|
||||
// Initially no pheromones
|
||||
assert_eq!(stigmergy.get_intensity(TaskType::VectorSearch), 0.0);
|
||||
|
||||
// Deposit pheromone
|
||||
stigmergy.deposit(
|
||||
TaskType::VectorSearch,
|
||||
"node-1".to_string(),
|
||||
0.8,
|
||||
1000,
|
||||
);
|
||||
|
||||
// Should have intensity now
|
||||
assert!(stigmergy.get_intensity(TaskType::VectorSearch) > 0.0);
|
||||
|
||||
// Follow should return probability > 0.5
|
||||
let prob = stigmergy.follow(TaskType::VectorSearch);
|
||||
assert!(prob > 0.5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deposit_with_outcome() {
|
||||
let stigmergy = Stigmergy::new();
|
||||
|
||||
// Success deposits pheromone
|
||||
stigmergy.deposit_with_outcome(
|
||||
TaskType::Embedding,
|
||||
"node-2".to_string(),
|
||||
true,
|
||||
500,
|
||||
);
|
||||
assert!(stigmergy.get_intensity(TaskType::Embedding) > 0.0);
|
||||
|
||||
// Failure updates success rate but no pheromone deposit
|
||||
let intensity_before = stigmergy.get_intensity(TaskType::Embedding);
|
||||
stigmergy.deposit_with_outcome(
|
||||
TaskType::Embedding,
|
||||
"node-2".to_string(),
|
||||
false,
|
||||
500,
|
||||
);
|
||||
assert_eq!(
|
||||
stigmergy.get_intensity(TaskType::Embedding),
|
||||
intensity_before
|
||||
);
|
||||
// But success rate should decrease
|
||||
assert!(stigmergy.get_success_rate(TaskType::Embedding) < 0.55);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evaporation() {
|
||||
// Evaporation depends on elapsed time since last deposit
|
||||
// With near-zero elapsed time, decay_factor ~ 1.0, so intensity barely changes
|
||||
// To test evaporation properly, we need to wait or accept the behavior
|
||||
let stigmergy = Stigmergy::with_params(0.99, 1.0, 0.001); // Very high decay rate
|
||||
|
||||
stigmergy.deposit(
|
||||
TaskType::Compression,
|
||||
"node-3".to_string(),
|
||||
1.0,
|
||||
1000,
|
||||
);
|
||||
let initial = stigmergy.get_intensity(TaskType::Compression);
|
||||
assert!(initial > 0.0, "Initial intensity should be > 0");
|
||||
|
||||
// Wait a tiny bit to ensure some time passes
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
|
||||
// Evaporate - with very high decay rate (0.99), even small time should cause decay
|
||||
stigmergy.evaporate();
|
||||
|
||||
// Intensity should decrease (or at least not increase)
|
||||
let after = stigmergy.get_intensity(TaskType::Compression);
|
||||
// With 0.99 decay rate, after small time: decay_factor = 0.01^(elapsed_hours)
|
||||
// For 10ms = 0.00000278 hours: decay_factor = 0.01^0.00000278 ~ 0.99987
|
||||
// So after ~ initial * 0.99987, which is very close
|
||||
// The trail may be cleaned up if intensity < 0.01
|
||||
assert!(after <= initial, "Intensity should not increase: {} vs {}", after, initial);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merge() {
|
||||
let stigmergy = Stigmergy::new();
|
||||
|
||||
// Add local pheromone
|
||||
stigmergy.deposit(
|
||||
TaskType::Encryption,
|
||||
"node-local".to_string(),
|
||||
1.0,
|
||||
1000,
|
||||
);
|
||||
let local_intensity = stigmergy.get_intensity(TaskType::Encryption);
|
||||
|
||||
// Merge with peer state
|
||||
let mut peer_trails = FxHashMap::default();
|
||||
peer_trails.insert(TaskType::Encryption, 10.0);
|
||||
peer_trails.insert(TaskType::NeuralInference, 5.0);
|
||||
|
||||
stigmergy.merge(&peer_trails);
|
||||
|
||||
// Local should be weighted 0.7, remote 0.3
|
||||
let merged = stigmergy.get_intensity(TaskType::Encryption);
|
||||
let expected = 0.7 * local_intensity + 0.3 * 10.0;
|
||||
assert!((merged - expected).abs() < 0.01);
|
||||
|
||||
// New task type should appear
|
||||
assert!(stigmergy.get_intensity(TaskType::NeuralInference) > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_specialization() {
|
||||
let stigmergy = Stigmergy::new();
|
||||
|
||||
// Initially neutral
|
||||
assert!((stigmergy.get_specialization(TaskType::SemanticMatch) - 0.5).abs() < 0.01);
|
||||
|
||||
// Success increases specialization
|
||||
stigmergy.update_specialization(TaskType::SemanticMatch, true);
|
||||
assert!(stigmergy.get_specialization(TaskType::SemanticMatch) > 0.5);
|
||||
|
||||
// Failure decreases it
|
||||
stigmergy.update_specialization(TaskType::SemanticMatch, false);
|
||||
let spec = stigmergy.get_specialization(TaskType::SemanticMatch);
|
||||
assert!(spec > 0.4 && spec < 0.6); // Should be around 0.5 after one success, one failure
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_anti_sybil() {
|
||||
let mut stigmergy = Stigmergy::new();
|
||||
stigmergy.set_min_stake(100);
|
||||
|
||||
// Low stake deposit should be rejected
|
||||
stigmergy.deposit(
|
||||
TaskType::CustomWasm,
|
||||
"sybil".to_string(),
|
||||
1.0,
|
||||
50, // Below minimum
|
||||
);
|
||||
assert_eq!(stigmergy.get_intensity(TaskType::CustomWasm), 0.0);
|
||||
|
||||
// High stake deposit should work
|
||||
stigmergy.deposit(
|
||||
TaskType::CustomWasm,
|
||||
"legit".to_string(),
|
||||
1.0,
|
||||
200,
|
||||
);
|
||||
assert!(stigmergy.get_intensity(TaskType::CustomWasm) > 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ring_buffer() {
|
||||
let mut buffer: RingBuffer<i32> = RingBuffer::new(3);
|
||||
|
||||
buffer.push(1);
|
||||
buffer.push(2);
|
||||
buffer.push(3);
|
||||
assert_eq!(buffer.len(), 3);
|
||||
|
||||
// Should evict oldest
|
||||
buffer.push(4);
|
||||
assert_eq!(buffer.len(), 3);
|
||||
|
||||
let items: Vec<_> = buffer.iter().copied().collect();
|
||||
assert_eq!(items, vec![2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stats() {
|
||||
let stigmergy = Stigmergy::new();
|
||||
|
||||
stigmergy.deposit(TaskType::VectorSearch, "n1".to_string(), 1.0, 100);
|
||||
stigmergy.deposit_with_outcome(TaskType::VectorInsert, "n2".to_string(), true, 100);
|
||||
stigmergy.deposit_with_outcome(TaskType::VectorInsert, "n2".to_string(), false, 100);
|
||||
|
||||
let stats = stigmergy.get_stats();
|
||||
assert_eq!(stats.trail_count, 2);
|
||||
assert!(stats.total_intensity > 0.0);
|
||||
assert_eq!(stats.total_completions, 1);
|
||||
assert_eq!(stats.total_failures, 1);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user