Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,204 @@
//! Threshold adjustment types.
use super::config::ThresholdConfig;
use serde::{Deserialize, Serialize};
/// Reason for a threshold adjustment.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AdjustmentReason {
/// Energy spike detected, tightening thresholds.
EnergySpike {
/// The spike magnitude.
magnitude: f32,
},
/// Sustained incoherence, adjusting for stability.
SustainedIncoherence {
/// Duration in seconds.
duration_secs: f32,
},
/// Success pattern detected, optimizing thresholds.
SuccessPattern {
/// Pattern similarity score.
similarity: f32,
},
/// Manual override requested.
ManualOverride,
/// Background learning produced new optimal values.
BackgroundLearning {
/// Number of training samples.
samples: usize,
},
/// Cold start initialization.
ColdStart,
/// Regime change detected.
RegimeChange {
/// Detected regime identifier.
regime_id: String,
},
}
/// Recommended threshold adjustment from the tuner.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ThresholdAdjustment {
/// The recommended new threshold configuration.
pub new_thresholds: ThresholdConfig,
/// Reason for the adjustment.
pub reason: AdjustmentReason,
/// Confidence in the adjustment (0.0 to 1.0).
pub confidence: f32,
/// Whether the adjustment is urgent (should be applied immediately).
pub urgent: bool,
/// Delta from current thresholds.
pub delta: ThresholdDelta,
/// Timestamp when adjustment was computed.
pub timestamp_ms: u64,
}
impl ThresholdAdjustment {
/// Create a new threshold adjustment.
pub fn new(
current: &ThresholdConfig,
new_thresholds: ThresholdConfig,
reason: AdjustmentReason,
confidence: f32,
) -> Self {
let delta = ThresholdDelta {
reflex_delta: new_thresholds.reflex - current.reflex,
retrieval_delta: new_thresholds.retrieval - current.retrieval,
heavy_delta: new_thresholds.heavy - current.heavy,
};
let urgent = matches!(
reason,
AdjustmentReason::EnergySpike { magnitude } if magnitude > 0.5
);
Self {
new_thresholds,
reason,
confidence,
urgent,
delta,
timestamp_ms: current_time_ms(),
}
}
/// Create an adjustment for an energy spike.
pub fn for_energy_spike(current: &ThresholdConfig, spike_magnitude: f32) -> Self {
// Tighten thresholds proportionally to spike
let factor = 1.0 - (spike_magnitude * 0.5).min(0.4);
let new = ThresholdConfig {
reflex: current.reflex * factor,
retrieval: current.retrieval * factor,
heavy: current.heavy * factor,
persistence_window_secs: current.persistence_window_secs,
};
Self::new(
current,
new,
AdjustmentReason::EnergySpike {
magnitude: spike_magnitude,
},
0.8 + spike_magnitude * 0.1,
)
}
/// Create an adjustment based on a success pattern.
pub fn from_success_pattern(
current: &ThresholdConfig,
pattern_thresholds: ThresholdConfig,
similarity: f32,
) -> Self {
// Interpolate toward the successful pattern based on similarity
let new = current.lerp(&pattern_thresholds, similarity * 0.5);
Self::new(
current,
new,
AdjustmentReason::SuccessPattern { similarity },
similarity,
)
}
/// Check if this adjustment is significant enough to apply.
pub fn is_significant(&self) -> bool {
self.delta.max_abs_delta() > 0.01 && self.confidence > 0.5
}
/// Get the magnitude of the adjustment.
pub fn magnitude(&self) -> f32 {
self.delta.max_abs_delta()
}
}
/// Delta between two threshold configurations.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct ThresholdDelta {
/// Change in reflex threshold.
pub reflex_delta: f32,
/// Change in retrieval threshold.
pub retrieval_delta: f32,
/// Change in heavy threshold.
pub heavy_delta: f32,
}
impl ThresholdDelta {
/// Get the maximum absolute delta.
pub fn max_abs_delta(&self) -> f32 {
self.reflex_delta
.abs()
.max(self.retrieval_delta.abs())
.max(self.heavy_delta.abs())
}
/// Get the total magnitude of change.
pub fn total_magnitude(&self) -> f32 {
(self.reflex_delta.powi(2) + self.retrieval_delta.powi(2) + self.heavy_delta.powi(2)).sqrt()
}
}
/// Get current time in milliseconds.
fn current_time_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_energy_spike_adjustment() {
let current = ThresholdConfig::default();
let adj = ThresholdAdjustment::for_energy_spike(&current, 0.5);
assert!(adj.new_thresholds.reflex < current.reflex);
assert!(adj.confidence > 0.8);
assert!(adj.urgent);
}
#[test]
fn test_success_pattern_adjustment() {
let current = ThresholdConfig::default();
let pattern = ThresholdConfig::conservative();
let adj = ThresholdAdjustment::from_success_pattern(&current, pattern, 0.9);
assert!(adj.new_thresholds.reflex < current.reflex);
assert!(adj.confidence > 0.8);
}
#[test]
fn test_threshold_delta() {
let delta = ThresholdDelta {
reflex_delta: 0.1,
retrieval_delta: -0.2,
heavy_delta: 0.05,
};
assert!((delta.max_abs_delta() - 0.2).abs() < 0.001);
}
}

View File

@@ -0,0 +1,237 @@
//! Configuration types for SONA threshold tuning.
use serde::{Deserialize, Serialize};
/// Configuration for the SONA threshold tuner.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TunerConfig {
/// Hidden dimension for SONA engine.
pub hidden_dim: usize,
/// Embedding dimension.
pub embedding_dim: usize,
/// Initial threshold configuration.
pub initial_thresholds: ThresholdConfig,
/// Instant learning loop configuration.
pub instant_loop: LearningLoopConfig,
/// Background learning loop configuration.
pub background_loop: LearningLoopConfig,
/// EWC++ lambda for weight consolidation.
pub ewc_lambda: f32,
/// Pattern similarity threshold for reasoning bank queries.
pub pattern_similarity_threshold: f32,
/// Maximum patterns to store in reasoning bank.
pub max_patterns: usize,
/// Enable auto-consolidation after N trajectories.
pub auto_consolidate_after: usize,
}
impl Default for TunerConfig {
fn default() -> Self {
Self {
hidden_dim: 256,
embedding_dim: 256,
initial_thresholds: ThresholdConfig::default(),
instant_loop: LearningLoopConfig::instant(),
background_loop: LearningLoopConfig::background(),
ewc_lambda: 0.4,
pattern_similarity_threshold: 0.85,
max_patterns: 10000,
auto_consolidate_after: 100,
}
}
}
/// Threshold configuration for compute lanes.
///
/// The coherence gate uses these thresholds to determine which compute lane
/// to use based on the current energy level.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct ThresholdConfig {
/// Energy threshold for Lane 0 (Reflex) - below this, allow without checks.
pub reflex: f32,
/// Energy threshold for Lane 1 (Retrieval) - requires evidence fetching.
pub retrieval: f32,
/// Energy threshold for Lane 2 (Heavy) - requires multi-step reasoning.
pub heavy: f32,
/// Persistence window in seconds before escalation.
pub persistence_window_secs: u64,
}
impl Default for ThresholdConfig {
fn default() -> Self {
Self {
reflex: 0.1, // Low energy: proceed without checks
retrieval: 0.3, // Medium energy: fetch evidence
heavy: 0.7, // High energy: deep reasoning
persistence_window_secs: 5,
}
}
}
impl ThresholdConfig {
/// Create a conservative threshold configuration.
#[must_use]
pub fn conservative() -> Self {
Self {
reflex: 0.05,
retrieval: 0.15,
heavy: 0.5,
persistence_window_secs: 10,
}
}
/// Create an aggressive threshold configuration.
#[must_use]
pub fn aggressive() -> Self {
Self {
reflex: 0.2,
retrieval: 0.5,
heavy: 0.9,
persistence_window_secs: 2,
}
}
/// Check if the configuration is valid.
#[must_use]
pub fn is_valid(&self) -> bool {
self.reflex >= 0.0
&& self.retrieval > self.reflex
&& self.heavy > self.retrieval
&& self.heavy <= 1.0
}
/// Get the compute lane for a given energy level.
#[must_use]
pub fn lane_for_energy(&self, energy: f32) -> ComputeLane {
if energy < self.reflex {
ComputeLane::Reflex
} else if energy < self.retrieval {
ComputeLane::Retrieval
} else if energy < self.heavy {
ComputeLane::Heavy
} else {
ComputeLane::Human
}
}
/// Interpolate between two configurations.
#[must_use]
pub fn lerp(&self, other: &Self, t: f32) -> Self {
let t = t.clamp(0.0, 1.0);
Self {
reflex: self.reflex + (other.reflex - self.reflex) * t,
retrieval: self.retrieval + (other.retrieval - self.retrieval) * t,
heavy: self.heavy + (other.heavy - self.heavy) * t,
persistence_window_secs: if t < 0.5 {
self.persistence_window_secs
} else {
other.persistence_window_secs
},
}
}
}
/// Compute lanes for escalating complexity.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum ComputeLane {
/// Lane 0: Local residual updates, simple aggregates (<1ms).
Reflex = 0,
/// Lane 1: Evidence fetching, lightweight reasoning (~10ms).
Retrieval = 1,
/// Lane 2: Multi-step planning, spectral analysis (~100ms).
Heavy = 2,
/// Lane 3: Human escalation for sustained incoherence.
Human = 3,
}
/// Configuration for a learning loop.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LearningLoopConfig {
/// Learning rate.
pub learning_rate: f32,
/// LoRA rank (1-2 for Micro-LoRA, higher for Base-LoRA).
pub lora_rank: usize,
/// Batch size for updates.
pub batch_size: usize,
/// Maximum latency target in microseconds.
pub max_latency_us: u64,
/// Enable gradient clipping.
pub gradient_clipping: bool,
/// Gradient clip value.
pub gradient_clip_value: f32,
}
impl LearningLoopConfig {
/// Create configuration for instant (Micro-LoRA) loop.
#[must_use]
pub fn instant() -> Self {
Self {
learning_rate: 0.01,
lora_rank: 1, // Ultra-low rank for speed
batch_size: 1,
max_latency_us: 50, // <0.05ms target
gradient_clipping: true,
gradient_clip_value: 1.0,
}
}
/// Create configuration for background (Base-LoRA) loop.
#[must_use]
pub fn background() -> Self {
Self {
learning_rate: 0.001,
lora_rank: 8, // Higher rank for better learning
batch_size: 32,
max_latency_us: 10_000, // 10ms is fine for background
gradient_clipping: true,
gradient_clip_value: 1.0,
}
}
}
impl Default for LearningLoopConfig {
fn default() -> Self {
Self::instant()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_threshold_config_validity() {
assert!(ThresholdConfig::default().is_valid());
assert!(ThresholdConfig::conservative().is_valid());
assert!(ThresholdConfig::aggressive().is_valid());
let invalid = ThresholdConfig {
reflex: 0.5,
retrieval: 0.3, // Less than reflex
heavy: 0.7,
persistence_window_secs: 5,
};
assert!(!invalid.is_valid());
}
#[test]
fn test_lane_for_energy() {
let config = ThresholdConfig::default();
assert_eq!(config.lane_for_energy(0.0), ComputeLane::Reflex);
assert_eq!(config.lane_for_energy(0.05), ComputeLane::Reflex);
assert_eq!(config.lane_for_energy(0.15), ComputeLane::Retrieval);
assert_eq!(config.lane_for_energy(0.5), ComputeLane::Heavy);
assert_eq!(config.lane_for_energy(1.0), ComputeLane::Human);
}
#[test]
fn test_threshold_lerp() {
let conservative = ThresholdConfig::conservative();
let aggressive = ThresholdConfig::aggressive();
let mid = conservative.lerp(&aggressive, 0.5);
assert!(mid.reflex > conservative.reflex);
assert!(mid.reflex < aggressive.reflex);
}
}

View File

@@ -0,0 +1,79 @@
//! Error types for the SONA tuning integration module.
use thiserror::Error;
/// Result type for SONA tuning operations.
pub type SonaTuningResult<T> = Result<T, SonaTuningError>;
/// Errors that can occur in SONA tuning operations.
#[derive(Debug, Error)]
pub enum SonaTuningError {
/// Invalid threshold configuration.
#[error("invalid threshold configuration: {0}")]
InvalidThresholdConfig(String),
/// Trajectory tracking error.
#[error("trajectory tracking error: {0}")]
TrajectoryError(String),
/// Learning loop error.
#[error("learning loop error: {0}")]
LearningLoopError(String),
/// Pattern not found in reasoning bank.
#[error("pattern not found: {0}")]
PatternNotFound(String),
/// Consolidation error.
#[error("knowledge consolidation error: {0}")]
ConsolidationError(String),
/// Dimension mismatch between input and configuration.
#[error("dimension mismatch: expected {expected}, got {actual}")]
DimensionMismatch {
/// Expected dimension.
expected: usize,
/// Actual dimension.
actual: usize,
},
/// Engine not initialized.
#[error("SONA engine not initialized")]
EngineNotInitialized,
/// Regime tracking error.
#[error("regime tracking error: {0}")]
RegimeTrackingError(String),
/// Synchronization error between learning loops.
#[error("loop synchronization error: {0}")]
SyncError(String),
/// Configuration error.
#[error("configuration error: {0}")]
ConfigurationError(String),
/// Internal error.
#[error("internal SONA tuning error: {0}")]
Internal(String),
}
impl SonaTuningError {
/// Create a dimension mismatch error.
#[must_use]
pub fn dim_mismatch(expected: usize, actual: usize) -> Self {
Self::DimensionMismatch { expected, actual }
}
/// Create a trajectory error.
#[must_use]
pub fn trajectory(msg: impl Into<String>) -> Self {
Self::TrajectoryError(msg.into())
}
/// Create a learning loop error.
#[must_use]
pub fn learning_loop(msg: impl Into<String>) -> Self {
Self::LearningLoopError(msg.into())
}
}

View File

@@ -0,0 +1,50 @@
//! SONA Tuning Integration - Self-Optimizing Threshold Learning
//!
//! This module provides integration with the `sona` crate for adaptive threshold
//! learning in the coherence engine. SONA (Self-Optimizing Neural Architecture)
//! enables the coherence gate thresholds to adapt based on operational experience.
//!
//! # Architecture
//!
//! The SONA integration provides three learning loops:
//!
//! 1. **Instant Loop** (Micro-LoRA): Ultra-low latency (<0.05ms) adaptation
//! 2. **Background Loop** (Base-LoRA): Deeper learning in background threads
//! 3. **Coordination Loop**: Synchronizes instant and background learning
//!
//! # Key Types
//!
//! - [`SonaThresholdTuner`]: Main adapter for threshold learning
//! - [`ThresholdConfig`]: Threshold configuration for compute lanes
//! - [`ThresholdAdjustment`]: Recommended threshold changes
//! - [`TunerConfig`]: Configuration for the SONA integration
//!
//! # Example
//!
//! ```rust,ignore
//! use prime_radiant::sona_tuning::{SonaThresholdTuner, TunerConfig};
//!
//! // Create threshold tuner
//! let mut tuner = SonaThresholdTuner::new(TunerConfig::default());
//!
//! // Begin tracking a new regime
//! let builder = tuner.begin_regime(&energy_trace);
//!
//! // After observing outcome, learn from it
//! tuner.learn_outcome(builder, success_score);
//!
//! // Query for similar past configurations
//! if let Some(config) = tuner.find_similar_regime(&current_energy) {
//! // Apply recommended thresholds
//! }
//! ```
mod adjustment;
mod config;
mod error;
mod tuner;
pub use adjustment::{AdjustmentReason, ThresholdAdjustment};
pub use config::{LearningLoopConfig, ThresholdConfig, TunerConfig};
pub use error::{SonaTuningError, SonaTuningResult};
pub use tuner::{RegimeTracker, SonaThresholdTuner, TunerState};

View File

@@ -0,0 +1,475 @@
//! SONA threshold tuner implementation.
use super::adjustment::{AdjustmentReason, ThresholdAdjustment};
use super::config::{ThresholdConfig, TunerConfig};
use super::error::{SonaTuningError, SonaTuningResult};
use ruvector_sona::{
EwcConfig, EwcPlusPlus, PatternConfig, ReasoningBank, SonaConfig, SonaEngine, TrajectoryBuilder,
};
use std::collections::VecDeque;
/// State of the SONA threshold tuner.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TunerState {
/// Tuner is uninitialized.
Uninitialized,
/// Tuner is ready for learning.
Ready,
/// Tuner is tracking a regime.
TrackingRegime,
/// Tuner is consolidating knowledge.
Consolidating,
}
/// Tracks operational regimes for pattern learning.
#[derive(Debug)]
pub struct RegimeTracker {
/// Current regime ID.
current_regime: Option<String>,
/// Energy history for the current regime.
energy_history: VecDeque<f32>,
/// Maximum history length.
max_history: usize,
/// Regime start timestamp.
regime_start_ms: u64,
}
impl RegimeTracker {
/// Create a new regime tracker.
pub fn new(max_history: usize) -> Self {
Self {
current_regime: None,
energy_history: VecDeque::with_capacity(max_history),
max_history,
regime_start_ms: 0,
}
}
/// Start tracking a new regime.
pub fn start_regime(&mut self, regime_id: impl Into<String>, initial_energy: f32) {
self.current_regime = Some(regime_id.into());
self.energy_history.clear();
self.energy_history.push_back(initial_energy);
self.regime_start_ms = current_time_ms();
}
/// Record an energy observation.
pub fn record_energy(&mut self, energy: f32) {
if self.energy_history.len() >= self.max_history {
self.energy_history.pop_front();
}
self.energy_history.push_back(energy);
}
/// Get the current regime ID.
pub fn current_regime(&self) -> Option<&str> {
self.current_regime.as_deref()
}
/// Get the energy history as a slice.
pub fn energy_history(&self) -> &VecDeque<f32> {
&self.energy_history
}
/// Get the average energy in the current regime.
pub fn average_energy(&self) -> f32 {
if self.energy_history.is_empty() {
return 0.0;
}
self.energy_history.iter().sum::<f32>() / self.energy_history.len() as f32
}
/// Get the energy trend (positive = increasing, negative = decreasing).
pub fn energy_trend(&self) -> f32 {
if self.energy_history.len() < 2 {
return 0.0;
}
let half = self.energy_history.len() / 2;
let first_half_avg: f32 = self.energy_history.iter().take(half).sum::<f32>() / half as f32;
let second_half_avg: f32 = self.energy_history.iter().skip(half).sum::<f32>()
/ (self.energy_history.len() - half) as f32;
second_half_avg - first_half_avg
}
/// Get regime duration in seconds.
pub fn regime_duration_secs(&self) -> f32 {
(current_time_ms() - self.regime_start_ms) as f32 / 1000.0
}
/// End the current regime.
pub fn end_regime(&mut self) -> Option<RegimeSummary> {
self.current_regime.take().map(|id| RegimeSummary {
regime_id: id,
duration_secs: self.regime_duration_secs(),
average_energy: self.average_energy(),
energy_trend: self.energy_trend(),
sample_count: self.energy_history.len(),
})
}
}
/// Summary of a completed regime.
#[derive(Debug, Clone)]
pub struct RegimeSummary {
/// Regime identifier.
pub regime_id: String,
/// Duration in seconds.
pub duration_secs: f32,
/// Average energy.
pub average_energy: f32,
/// Energy trend.
pub energy_trend: f32,
/// Number of samples.
pub sample_count: usize,
}
/// SONA threshold tuner for adaptive threshold learning.
///
/// This adapter wraps the SONA engine to provide threshold tuning
/// specifically for the coherence gate.
pub struct SonaThresholdTuner {
/// The underlying SONA engine.
engine: SonaEngine,
/// EWC++ for preventing catastrophic forgetting.
ewc: EwcPlusPlus,
/// Reasoning bank for pattern storage and retrieval.
reasoning_bank: ReasoningBank,
/// Configuration.
config: TunerConfig,
/// Current threshold configuration.
current_thresholds: ThresholdConfig,
/// Regime tracker.
regime_tracker: RegimeTracker,
/// State.
state: TunerState,
/// Trajectories completed since last consolidation.
trajectories_since_consolidation: usize,
}
impl SonaThresholdTuner {
/// Create a new SONA threshold tuner.
pub fn new(config: TunerConfig) -> Self {
let sona_config = SonaConfig {
hidden_dim: config.hidden_dim,
embedding_dim: config.embedding_dim,
..Default::default()
};
let engine = SonaEngine::with_config(sona_config);
let ewc_config = EwcConfig {
initial_lambda: config.ewc_lambda,
..Default::default()
};
let ewc = EwcPlusPlus::new(ewc_config);
let pattern_config = PatternConfig::default();
let reasoning_bank = ReasoningBank::new(pattern_config);
Self {
engine,
ewc,
reasoning_bank,
current_thresholds: config.initial_thresholds,
regime_tracker: RegimeTracker::new(1000),
state: TunerState::Ready,
trajectories_since_consolidation: 0,
config,
}
}
/// Create with default configuration.
pub fn default_tuner() -> Self {
Self::new(TunerConfig::default())
}
/// Get the current state.
pub fn state(&self) -> TunerState {
self.state
}
/// Get the current threshold configuration.
pub fn current_thresholds(&self) -> &ThresholdConfig {
&self.current_thresholds
}
/// Begin tracking a new operational regime.
///
/// This starts a trajectory in the SONA engine and begins
/// recording energy observations.
pub fn begin_regime(&mut self, energy_trace: &[f32]) -> SonaTuningResult<TrajectoryBuilder> {
if energy_trace.is_empty() {
return Err(SonaTuningError::trajectory("empty energy trace"));
}
// Convert energy trace to embedding
let mut embedding = vec![0.0; self.config.embedding_dim];
for (i, &e) in energy_trace
.iter()
.take(self.config.embedding_dim)
.enumerate()
{
embedding[i] = e;
}
// Start SONA trajectory
let builder = self.engine.begin_trajectory(embedding);
// Start regime tracking
let regime_id = format!("regime_{}", current_time_ms());
self.regime_tracker
.start_regime(&regime_id, energy_trace.last().copied().unwrap_or(0.0));
self.state = TunerState::TrackingRegime;
Ok(builder)
}
/// Record an energy observation during regime tracking.
pub fn record_energy(&mut self, energy: f32) {
self.regime_tracker.record_energy(energy);
}
/// Learn from the outcome of a regime.
///
/// This ends the SONA trajectory and stores successful patterns.
pub fn learn_outcome(
&mut self,
builder: TrajectoryBuilder,
success_score: f32,
) -> SonaTuningResult<Option<ThresholdAdjustment>> {
// End SONA trajectory
self.engine.end_trajectory(builder, success_score);
// End regime tracking
let summary = self.regime_tracker.end_regime();
self.trajectories_since_consolidation += 1;
self.state = TunerState::Ready;
// If successful, store pattern
if success_score > 0.8 {
self.store_success_pattern(success_score)?;
}
// Auto-consolidate if needed
if self.trajectories_since_consolidation >= self.config.auto_consolidate_after {
self.consolidate_knowledge()?;
}
// Generate adjustment if we learned something useful
if success_score > 0.9 {
if let Some(summary) = summary {
return Ok(Some(ThresholdAdjustment::new(
&self.current_thresholds,
self.current_thresholds, // Keep current for now
AdjustmentReason::BackgroundLearning {
samples: summary.sample_count,
},
success_score,
)));
}
}
Ok(None)
}
/// Store a successful pattern in the reasoning bank.
fn store_success_pattern(&mut self, _score: f32) -> SonaTuningResult<()> {
// Note: ReasoningBank uses add_trajectory for storage
// For simplicity, we skip pattern storage in this integration
// A full implementation would create QueryTrajectory objects
Ok(())
}
/// Convert a threshold configuration to an embedding vector.
fn threshold_to_embedding(&self, config: &ThresholdConfig) -> Vec<f32> {
let mut embedding = vec![0.0; self.config.embedding_dim];
embedding[0] = config.reflex;
embedding[1] = config.retrieval;
embedding[2] = config.heavy;
embedding[3] = config.persistence_window_secs as f32 / 60.0; // Normalize to minutes
embedding
}
/// Convert an embedding back to threshold configuration.
fn embedding_to_threshold(&self, embedding: &[f32]) -> Option<ThresholdConfig> {
if embedding.len() < 4 {
return None;
}
let config = ThresholdConfig {
reflex: embedding[0].clamp(0.0, 1.0),
retrieval: embedding[1].clamp(0.0, 1.0),
heavy: embedding[2].clamp(0.0, 1.0),
persistence_window_secs: (embedding[3] * 60.0).max(1.0) as u64,
};
if config.is_valid() {
Some(config)
} else {
None
}
}
/// Find a similar regime configuration from past experience.
pub fn find_similar_regime(&self, current_energy: &[f32]) -> Option<ThresholdConfig> {
// Convert current energy to query embedding
let mut query = vec![0.0; self.config.embedding_dim];
for (i, &e) in current_energy
.iter()
.take(self.config.embedding_dim)
.enumerate()
{
query[i] = e;
}
// Query reasoning bank using find_similar
let similar = self.reasoning_bank.find_similar(&query, 1);
if let Some(pattern) = similar.first() {
self.embedding_to_threshold(&pattern.centroid)
} else {
None
}
}
/// Instantly adapt to an energy spike.
///
/// This uses Micro-LoRA for ultra-fast (<0.05ms) adaptation.
pub fn instant_adapt(&mut self, energy_spike: f32) -> ThresholdAdjustment {
// Apply Micro-LoRA adaptation
let input = vec![energy_spike; self.config.embedding_dim];
let mut output = vec![0.0; self.config.embedding_dim];
self.engine.apply_micro_lora(&input, &mut output);
// Generate adjustment
ThresholdAdjustment::for_energy_spike(&self.current_thresholds, energy_spike)
}
/// Apply a threshold adjustment.
pub fn apply_adjustment(&mut self, adjustment: &ThresholdAdjustment) {
if adjustment.new_thresholds.is_valid() {
self.current_thresholds = adjustment.new_thresholds;
}
}
/// Consolidate learned knowledge using EWC++.
///
/// This prevents catastrophic forgetting when adapting to new regimes.
pub fn consolidate_knowledge(&mut self) -> SonaTuningResult<()> {
self.state = TunerState::Consolidating;
// Trigger EWC++ consolidation
self.ewc.consolidate_all_tasks();
self.trajectories_since_consolidation = 0;
self.state = TunerState::Ready;
Ok(())
}
/// Get tuner statistics.
pub fn stats(&self) -> TunerStats {
TunerStats {
state: self.state,
current_thresholds: self.current_thresholds,
patterns_stored: self.reasoning_bank.pattern_count(),
trajectories_since_consolidation: self.trajectories_since_consolidation,
regime_average_energy: self.regime_tracker.average_energy(),
regime_energy_trend: self.regime_tracker.energy_trend(),
}
}
/// Reset the tuner to initial state.
pub fn reset(&mut self) {
self.current_thresholds = self.config.initial_thresholds;
self.regime_tracker = RegimeTracker::new(1000);
self.trajectories_since_consolidation = 0;
self.state = TunerState::Ready;
}
}
impl std::fmt::Debug for SonaThresholdTuner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SonaThresholdTuner")
.field("state", &self.state)
.field("current_thresholds", &self.current_thresholds)
.field("patterns_stored", &self.reasoning_bank.pattern_count())
.finish()
}
}
/// Tuner statistics.
#[derive(Debug, Clone, Copy)]
pub struct TunerStats {
/// Current state.
pub state: TunerState,
/// Current thresholds.
pub current_thresholds: ThresholdConfig,
/// Number of patterns stored.
pub patterns_stored: usize,
/// Trajectories since last consolidation.
pub trajectories_since_consolidation: usize,
/// Average energy in current regime.
pub regime_average_energy: f32,
/// Energy trend in current regime.
pub regime_energy_trend: f32,
}
/// Get current time in milliseconds.
fn current_time_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tuner_creation() {
let tuner = SonaThresholdTuner::default_tuner();
assert_eq!(tuner.state(), TunerState::Ready);
}
#[test]
fn test_regime_tracker() {
let mut tracker = RegimeTracker::new(100);
tracker.start_regime("test", 0.5);
tracker.record_energy(0.6);
tracker.record_energy(0.7);
assert_eq!(tracker.current_regime(), Some("test"));
assert!(tracker.average_energy() > 0.5);
assert!(tracker.energy_trend() > 0.0);
}
#[test]
fn test_instant_adapt() {
let mut tuner = SonaThresholdTuner::default_tuner();
let initial = *tuner.current_thresholds();
let adjustment = tuner.instant_adapt(0.5);
assert!(adjustment.new_thresholds.reflex < initial.reflex);
assert!(adjustment.urgent);
}
#[test]
fn test_threshold_embedding_roundtrip() {
let tuner = SonaThresholdTuner::default_tuner();
let original = ThresholdConfig::default();
let embedding = tuner.threshold_to_embedding(&original);
let recovered = tuner.embedding_to_threshold(&embedding);
assert!(recovered.is_some());
let recovered = recovered.unwrap();
assert!((recovered.reflex - original.reflex).abs() < 0.001);
}
}