Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,560 @@
//! Mincut-Gated Attention Integration
//!
//! This module bridges ruQu's coherence gate with the `ruvector-mincut-gated-transformer`
//! crate's attention optimization mechanisms:
//!
//! 1. **GatePacket Bridge** - Convert ruQu's `TileReport` aggregates into `GatePacket`
//! 2. **MincutDepthRouter** - λ-based Mixture-of-Depths routing for 50% FLOPs reduction
//! 3. **CoherenceEarlyExit** - Layer skipping based on coherence stability
//!
//! ## Usage
//!
//! ```rust,ignore
//! use ruqu::attention::{CoherenceAttention, AttentionConfig};
//! use ruqu::tile::{TileReport, GateThresholds};
//!
//! // Create attention optimizer
//! let config = AttentionConfig::default();
//! let mut attention = CoherenceAttention::new(config);
//!
//! // Process syndrome patterns with coherence-optimized attention
//! let reports: Vec<TileReport> = collect_worker_reports();
//! let (gate_packet, routing) = attention.optimize(&reports);
//!
//! // Use routing decisions for efficient syndrome analysis
//! for (i, route) in routing.iter().enumerate() {
//! if route.requires_compute() {
//! // Full analysis for this syndrome entry
//! } else {
//! // Skip - coherence is stable, use cached result
//! }
//! }
//! ```
#[cfg(feature = "attention")]
use ruvector_mincut_gated_transformer::{
CoherenceEarlyExit, EarlyExitConfig, EarlyExitDecision, ExitReason, GatePacket,
MincutDepthRouter, ModRoutingConfig, RoutingStats, TokenRoute,
};
use crate::tile::{GateDecision, TileReport};
/// Configuration for coherence-optimized attention
#[derive(Clone, Debug)]
pub struct AttentionConfig {
/// Target FLOPs reduction (0.0-0.9), default 0.5 for 50%
pub flops_reduction: f32,
/// Minimum entries that must be processed per round
pub min_entries_per_round: u16,
/// λ-delta threshold for skipping (Q15 scale)
/// Lower = more aggressive skipping
pub lambda_delta_skip_threshold: i32,
/// Enable adaptive capacity based on coherence stability
pub adaptive_capacity: bool,
/// Enable early exit when coherence is very stable
pub enable_early_exit: bool,
/// Early exit confidence threshold (0.0-1.0)
pub early_exit_threshold: f32,
}
impl Default for AttentionConfig {
fn default() -> Self {
Self {
flops_reduction: 0.5,
min_entries_per_round: 4,
lambda_delta_skip_threshold: 3276, // ~10% of Q15 range
adaptive_capacity: true,
enable_early_exit: true,
early_exit_threshold: 0.95,
}
}
}
impl AttentionConfig {
/// Configuration optimized for real-time coherence gating
pub fn realtime() -> Self {
Self {
flops_reduction: 0.6, // More aggressive skip
min_entries_per_round: 2,
lambda_delta_skip_threshold: 2000, // More aggressive
adaptive_capacity: true,
enable_early_exit: true,
early_exit_threshold: 0.9,
}
}
/// Configuration optimized for accuracy (less skipping)
pub fn accurate() -> Self {
Self {
flops_reduction: 0.3,
min_entries_per_round: 8,
lambda_delta_skip_threshold: 5000, // Less aggressive
adaptive_capacity: false,
enable_early_exit: false,
early_exit_threshold: 0.99,
}
}
}
/// Bridge between ruQu's TileReport and GatePacket
///
/// Converts aggregated tile metrics into the format expected by
/// the mincut-gated-transformer system.
#[derive(Clone, Copy, Debug, Default)]
pub struct GatePacketBridge {
/// Previous lambda for trend detection
prev_lambda: u32,
/// Smoothed boundary edge count
smoothed_boundary: u16,
}
impl GatePacketBridge {
/// Create a new bridge
pub fn new() -> Self {
Self::default()
}
/// Convert tile reports into a GatePacket
///
/// # Arguments
/// * `reports` - Aggregated worker tile reports
///
/// # Returns
/// A `GatePacket` suitable for mincut-gated-transformer
#[cfg(feature = "attention")]
pub fn to_gate_packet(&mut self, reports: &[TileReport]) -> GatePacket {
if reports.is_empty() {
return GatePacket::default();
}
// Aggregate metrics from reports
let mut min_cut = f64::MAX;
let mut max_shift = 0.0f64;
let mut total_boundary = 0u32;
let mut max_boundary_concentration = 0u32;
for report in reports {
if report.local_cut < min_cut && report.local_cut > 0.0 {
min_cut = report.local_cut;
}
if report.shift_score > max_shift {
max_shift = report.shift_score;
}
// Use boundary candidate count as proxy for boundary edges
total_boundary += report
.boundary_candidates
.iter()
.filter(|&&c| c != 0)
.count() as u32;
// Higher shift = more concentrated boundaries
let concentration = (report.shift_score * 32767.0) as u32;
if concentration > max_boundary_concentration {
max_boundary_concentration = concentration;
}
}
// Convert min_cut to lambda (Q15-ish scale)
// Higher min_cut = more coherent = higher lambda
let lambda = (min_cut.clamp(0.0, 1000.0) * 32.767) as u32;
// Smooth boundary edges
let boundary_edges = ((total_boundary as u32 + self.smoothed_boundary as u32) / 2) as u16;
self.smoothed_boundary = boundary_edges;
// Build packet
let packet = GatePacket {
lambda,
lambda_prev: self.prev_lambda,
boundary_edges,
boundary_concentration_q15: max_boundary_concentration.min(32767) as u16,
partition_count: reports.len() as u16,
flags: 0,
};
// Update history
self.prev_lambda = lambda;
packet
}
/// Convert a GatePacket back to approximate metrics
#[cfg(feature = "attention")]
pub fn from_gate_packet(packet: &GatePacket) -> (f64, f64, usize) {
let min_cut = packet.lambda as f64 / 32.767;
let shift_score = packet.boundary_concentration_q15 as f64 / 32767.0;
let partition_count = packet.partition_count as usize;
(min_cut, shift_score, partition_count)
}
}
/// Coherence-optimized attention processor
///
/// Uses mincut signals to dynamically route syndrome entries through
/// the analysis pipeline, achieving up to 50% FLOPs reduction while
/// maintaining accuracy on critical boundary patterns.
#[cfg(feature = "attention")]
pub struct CoherenceAttention {
config: AttentionConfig,
router: MincutDepthRouter,
bridge: GatePacketBridge,
stats: AttentionStats,
}
#[cfg(feature = "attention")]
impl CoherenceAttention {
/// Create a new coherence attention processor
pub fn new(config: AttentionConfig) -> Self {
let mod_config = ModRoutingConfig {
lambda_delta_skip_threshold: config.lambda_delta_skip_threshold,
boundary_token_force_compute: true,
layer_capacity_ratio: 1.0 - config.flops_reduction,
min_tokens_per_layer: config.min_entries_per_round,
adaptive_capacity: config.adaptive_capacity,
};
Self {
config,
router: MincutDepthRouter::new(mod_config).unwrap_or_default(),
bridge: GatePacketBridge::new(),
stats: AttentionStats::default(),
}
}
/// Optimize syndrome entry processing based on coherence
///
/// # Arguments
/// * `reports` - Worker tile reports with syndrome data
///
/// # Returns
/// Tuple of (GatePacket, routing decisions for each entry)
pub fn optimize(&mut self, reports: &[TileReport]) -> (GatePacket, Vec<TokenRoute>) {
let gate = self.bridge.to_gate_packet(reports);
// Generate position indices for routing
let positions: Vec<u16> = (0..reports.len() as u16).collect();
// Route entries based on coherence
let routes = self.router.route_tokens(&gate, &positions);
// Update stats
let routing_stats = self.router.routing_stats(&routes);
self.stats.total_entries += routing_stats.total_tokens;
self.stats.computed_entries += routing_stats.compute_tokens;
self.stats.skipped_entries += routing_stats.skip_tokens;
self.stats.boundary_entries += routing_stats.boundary_tokens;
self.stats.decisions += 1;
(gate, routes)
}
/// Check if early exit is warranted based on coherence stability
///
/// # Arguments
/// * `gate` - Current gate packet
/// * `current_layer` - Current processing layer
/// * `max_layers` - Maximum number of layers
///
/// # Returns
/// Early exit decision
pub fn check_early_exit(
&self,
gate: &GatePacket,
current_layer: usize,
max_layers: usize,
) -> EarlyExitDecision {
if !self.config.enable_early_exit {
return EarlyExitDecision {
should_exit: false,
confidence: 0.0,
reason: ExitReason::None,
};
}
// Calculate coherence stability
let lambda_delta_abs = gate.lambda_delta().abs() as f32;
let stability = 1.0 - (lambda_delta_abs / 32768.0).min(1.0);
// Calculate progress through layers
let progress = current_layer as f32 / max_layers as f32;
// Exit if very stable AND past midpoint
let should_exit = stability > self.config.early_exit_threshold && progress > 0.5;
EarlyExitDecision {
should_exit,
confidence: stability,
reason: if should_exit {
ExitReason::HighConfidence
} else {
ExitReason::None
},
}
}
/// Get accumulated statistics
pub fn stats(&self) -> &AttentionStats {
&self.stats
}
/// Reset statistics
pub fn reset_stats(&mut self) {
self.stats = AttentionStats::default();
}
}
/// Statistics for coherence attention
#[derive(Clone, Copy, Debug, Default)]
pub struct AttentionStats {
/// Total entries processed
pub total_entries: usize,
/// Entries that required full computation
pub computed_entries: usize,
/// Entries that were skipped
pub skipped_entries: usize,
/// Boundary entries (always computed)
pub boundary_entries: usize,
/// Number of routing decisions made
pub decisions: usize,
}
impl AttentionStats {
/// Calculate FLOPs reduction ratio
pub fn flops_reduction(&self) -> f32 {
if self.total_entries == 0 {
return 0.0;
}
self.skipped_entries as f32 / self.total_entries as f32
}
/// Calculate compute ratio
pub fn compute_ratio(&self) -> f32 {
if self.total_entries == 0 {
return 0.0;
}
self.computed_entries as f32 / self.total_entries as f32
}
}
/// Fallback types when attention feature is disabled
#[cfg(not(feature = "attention"))]
pub mod fallback {
use super::*;
/// Stub TokenRoute for when attention feature is disabled
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TokenRoute {
/// Process through full computation
Compute,
/// Skip - use cached result
Skip,
/// Boundary token - always compute
Boundary,
}
impl TokenRoute {
/// Check if this route requires computation
pub fn requires_compute(&self) -> bool {
!matches!(self, TokenRoute::Skip)
}
}
/// Stub GatePacket for when attention feature is disabled
#[derive(Clone, Copy, Debug, Default)]
pub struct GatePacket {
/// Current lambda (coherence metric)
pub lambda: u32,
/// Previous lambda for trend detection
pub lambda_prev: u32,
/// Number of boundary edges
pub boundary_edges: u16,
/// Boundary concentration (Q15 scale)
pub boundary_concentration_q15: u16,
/// Number of partitions
pub partition_count: u16,
/// Policy flags
pub flags: u16,
}
impl GatePacket {
/// Calculate lambda delta
pub fn lambda_delta(&self) -> i32 {
(self.lambda as i32) - (self.lambda_prev as i32)
}
}
/// Simplified attention processor without transformer dependency
pub struct CoherenceAttention {
#[allow(dead_code)]
config: AttentionConfig,
bridge: GatePacketBridge,
stats: AttentionStats,
}
impl CoherenceAttention {
/// Create a new coherence attention processor
pub fn new(config: AttentionConfig) -> Self {
Self {
config,
bridge: GatePacketBridge::new(),
stats: AttentionStats::default(),
}
}
/// Optimize syndrome entry processing based on coherence
pub fn optimize(&mut self, reports: &[TileReport]) -> (GatePacket, Vec<TokenRoute>) {
let gate = self.bridge.to_gate_packet_fallback(reports);
// Simple heuristic routing without transformer
let routes: Vec<TokenRoute> = reports
.iter()
.enumerate()
.map(|(i, report)| {
// Boundary tokens always compute
if report.boundary_candidates.iter().any(|&c| c != 0) {
return TokenRoute::Boundary;
}
// Skip if shift score is low (stable)
if report.shift_score < 0.1 && i % 2 == 0 {
return TokenRoute::Skip;
}
TokenRoute::Compute
})
.collect();
// Update stats
self.stats.total_entries += routes.len();
self.stats.computed_entries += routes.iter().filter(|r| r.requires_compute()).count();
self.stats.skipped_entries += routes
.iter()
.filter(|r| matches!(r, TokenRoute::Skip))
.count();
self.stats.boundary_entries += routes
.iter()
.filter(|r| matches!(r, TokenRoute::Boundary))
.count();
self.stats.decisions += 1;
(gate, routes)
}
/// Get accumulated statistics
pub fn stats(&self) -> &AttentionStats {
&self.stats
}
/// Reset statistics
pub fn reset_stats(&mut self) {
self.stats = AttentionStats::default();
}
}
impl GatePacketBridge {
/// Convert tile reports to gate packet (fallback implementation)
pub fn to_gate_packet_fallback(&mut self, reports: &[TileReport]) -> GatePacket {
if reports.is_empty() {
return GatePacket::default();
}
let mut min_cut = f64::MAX;
let mut max_shift = 0.0f64;
for report in reports {
if report.local_cut < min_cut && report.local_cut > 0.0 {
min_cut = report.local_cut;
}
if report.shift_score > max_shift {
max_shift = report.shift_score;
}
}
let lambda = (min_cut.clamp(0.0, 1000.0) * 32.767) as u32;
let packet = GatePacket {
lambda,
lambda_prev: self.prev_lambda,
boundary_edges: 0,
boundary_concentration_q15: (max_shift * 32767.0) as u16,
partition_count: reports.len() as u16,
flags: 0,
};
self.prev_lambda = lambda;
packet
}
}
}
#[cfg(not(feature = "attention"))]
pub use fallback::*;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_attention_config_default() {
let config = AttentionConfig::default();
assert_eq!(config.flops_reduction, 0.5);
assert!(config.enable_early_exit);
}
#[test]
fn test_attention_config_realtime() {
let config = AttentionConfig::realtime();
assert!(config.flops_reduction > 0.5);
}
#[test]
fn test_gate_packet_bridge() {
let mut bridge = GatePacketBridge::new();
// First call establishes baseline
let reports = vec![
{
let mut r = TileReport::new(1);
r.local_cut = 10.0;
r.shift_score = 0.2;
r
},
{
let mut r = TileReport::new(2);
r.local_cut = 15.0;
r.shift_score = 0.1;
r
},
];
#[cfg(feature = "attention")]
{
let packet = bridge.to_gate_packet(&reports);
assert!(packet.lambda > 0);
assert_eq!(packet.partition_count, 2);
}
#[cfg(not(feature = "attention"))]
{
let packet = bridge.to_gate_packet_fallback(&reports);
assert!(packet.lambda > 0);
assert_eq!(packet.partition_count, 2);
}
}
#[test]
fn test_attention_stats() {
let mut stats = AttentionStats::default();
stats.total_entries = 100;
stats.computed_entries = 60;
stats.skipped_entries = 40;
assert_eq!(stats.flops_reduction(), 0.4);
assert_eq!(stats.compute_ratio(), 0.6);
}
}

View File

@@ -0,0 +1,701 @@
//! ruQu Demo Binary - Proof Artifact
//!
//! This is the runnable demonstration of ruQu's capabilities.
//!
//! ## What it does
//!
//! 1. Generates a streaming syndrome feed
//! 2. Runs the coherence gate loop per round
//! 3. Prints live status: round, cut value, risk, region mask
//! 4. Writes metrics file: latency histogram, p50/p99/p999, false alarms
//!
//! ## Usage
//!
//! ```bash
//! # Basic run with defaults
//! cargo run --bin ruqu_demo --release
//!
//! # Custom parameters
//! cargo run --bin ruqu_demo --release -- \
//! --distance 7 \
//! --error-rate 0.01 \
//! --rounds 10000 \
//! --output metrics.json
//! ```
use std::collections::VecDeque;
use std::fs::File;
use std::io::Write;
use std::time::Instant;
use ruqu::stim::{StimSyndromeSource, SurfaceCodeConfig};
use ruqu::syndrome::DetectorBitmap;
// ============================================================================
// CONFIGURATION
// ============================================================================
#[derive(Debug, Clone)]
struct DemoConfig {
/// Code distance
code_distance: usize,
/// Physical error rate
error_rate: f64,
/// Number of rounds to run
num_rounds: usize,
/// Random seed
seed: u64,
/// Output metrics file
output_file: Option<String>,
/// Print interval (every N rounds)
print_interval: usize,
/// Gate threshold
threshold: f64,
}
impl Default for DemoConfig {
fn default() -> Self {
Self {
code_distance: 5,
error_rate: 0.01,
num_rounds: 10000,
seed: 42,
output_file: Some("ruqu_metrics.json".to_string()),
print_interval: 1000,
threshold: 5.0,
}
}
}
fn parse_args() -> DemoConfig {
let args: Vec<String> = std::env::args().collect();
let mut config = DemoConfig::default();
let mut i = 1;
while i < args.len() {
match args[i].as_str() {
"--distance" | "-d" => {
i += 1;
config.code_distance = args[i].parse().expect("Invalid distance");
}
"--error-rate" | "-e" => {
i += 1;
config.error_rate = args[i].parse().expect("Invalid error rate");
}
"--rounds" | "-r" => {
i += 1;
config.num_rounds = args[i].parse().expect("Invalid rounds");
}
"--seed" | "-s" => {
i += 1;
config.seed = args[i].parse().expect("Invalid seed");
}
"--output" | "-o" => {
i += 1;
config.output_file = Some(args[i].clone());
}
"--threshold" | "-t" => {
i += 1;
config.threshold = args[i].parse().expect("Invalid threshold");
}
"--help" | "-h" => {
print_help();
std::process::exit(0);
}
_ => {
eprintln!("Unknown argument: {}", args[i]);
std::process::exit(1);
}
}
i += 1;
}
config
}
fn print_help() {
println!(
r#"
ruQu Demo - Coherence Gate Demonstration
USAGE:
ruqu_demo [OPTIONS]
OPTIONS:
-d, --distance <N> Code distance (default: 5)
-e, --error-rate <P> Physical error rate (default: 0.01)
-r, --rounds <N> Number of rounds (default: 10000)
-s, --seed <N> Random seed (default: 42)
-o, --output <FILE> Output metrics file (default: ruqu_metrics.json)
-t, --threshold <T> Gate threshold (default: 5.0)
-h, --help Print this help message
"#
);
}
// ============================================================================
// LATENCY TRACKING
// ============================================================================
struct LatencyTracker {
latencies: Vec<u64>,
recent: VecDeque<u64>,
max_recent: usize,
}
impl LatencyTracker {
fn new(max_recent: usize) -> Self {
Self {
latencies: Vec::new(),
recent: VecDeque::with_capacity(max_recent),
max_recent,
}
}
fn record(&mut self, latency_ns: u64) {
self.latencies.push(latency_ns);
if self.recent.len() >= self.max_recent {
self.recent.pop_front();
}
self.recent.push_back(latency_ns);
}
fn percentile(&self, p: f64) -> u64 {
if self.latencies.is_empty() {
return 0;
}
let mut sorted = self.latencies.clone();
sorted.sort_unstable();
let idx = ((p / 100.0) * (sorted.len() - 1) as f64) as usize;
sorted[idx]
}
fn p50(&self) -> u64 {
self.percentile(50.0)
}
fn p99(&self) -> u64 {
self.percentile(99.0)
}
fn p999(&self) -> u64 {
self.percentile(99.9)
}
fn max(&self) -> u64 {
self.latencies.iter().copied().max().unwrap_or(0)
}
fn mean(&self) -> f64 {
if self.latencies.is_empty() {
return 0.0;
}
let sum: u64 = self.latencies.iter().sum();
sum as f64 / self.latencies.len() as f64
}
fn count(&self) -> usize {
self.latencies.len()
}
fn histogram(&self, num_buckets: usize) -> Vec<(u64, u64, usize)> {
if self.latencies.is_empty() {
return vec![];
}
let min = *self.latencies.iter().min().unwrap();
let max = self.max();
let range = max - min + 1;
let bucket_size = (range / num_buckets as u64).max(1);
let mut buckets = vec![0usize; num_buckets];
for &lat in &self.latencies {
let bucket = ((lat - min) / bucket_size).min(num_buckets as u64 - 1) as usize;
buckets[bucket] += 1;
}
buckets
.into_iter()
.enumerate()
.map(|(i, count)| {
let start = min + i as u64 * bucket_size;
let end = start + bucket_size;
(start, end, count)
})
.collect()
}
}
// ============================================================================
// SIMPLE MIN-CUT GATE
// ============================================================================
use std::collections::{HashMap, HashSet};
struct MinCutGate {
threshold: f64,
grid_size: usize,
base_weight: f64,
}
impl MinCutGate {
fn new(code_distance: usize, error_rate: f64, threshold: f64) -> Self {
Self {
threshold,
grid_size: code_distance - 1,
base_weight: (-error_rate.ln()).max(0.1),
}
}
fn process(&self, syndrome: &DetectorBitmap) -> GateResult {
let start = Instant::now();
// Compute min-cut
let fired_set: HashSet<usize> = syndrome.iter_fired().collect();
let min_cut = self.compute_min_cut(&fired_set);
// Compute risk
let risk = if min_cut < self.threshold {
1.0 - (min_cut / self.threshold)
} else {
0.0
};
// Compute region mask (simplified: which quadrants have errors)
let region_mask = self.compute_region_mask(&fired_set);
let latency_ns = start.elapsed().as_nanos() as u64;
GateResult {
min_cut,
risk,
region_mask,
decision: if min_cut >= self.threshold {
Decision::Permit
} else if min_cut >= self.threshold * 0.5 {
Decision::Defer
} else {
Decision::Deny
},
latency_ns,
fired_count: fired_set.len(),
}
}
fn compute_min_cut(&self, fired_set: &HashSet<usize>) -> f64 {
// Simple s-t min-cut using Edmonds-Karp
let mut adj: HashMap<u32, Vec<(u32, f64)>> = HashMap::new();
let fired_weight = 0.01;
// Build grid
for row in 0..self.grid_size {
for col in 0..self.grid_size {
let node = (row * self.grid_size + col) as u32;
let is_fired = fired_set.contains(&(node as usize));
if col + 1 < self.grid_size {
let right = (row * self.grid_size + col + 1) as u32;
let right_fired = fired_set.contains(&(right as usize));
let weight = if is_fired || right_fired {
fired_weight
} else {
self.base_weight
};
adj.entry(node).or_default().push((right, weight));
adj.entry(right).or_default().push((node, weight));
}
if row + 1 < self.grid_size {
let bottom = ((row + 1) * self.grid_size + col) as u32;
let bottom_fired = fired_set.contains(&(bottom as usize));
let weight = if is_fired || bottom_fired {
fired_weight
} else {
self.base_weight
};
adj.entry(node).or_default().push((bottom, weight));
adj.entry(bottom).or_default().push((node, weight));
}
}
}
let source = (self.grid_size * self.grid_size) as u32;
let sink = source + 1;
// Connect boundaries
let boundary_weight = self.base_weight * 2.0;
for row in 0..self.grid_size {
let left = (row * self.grid_size) as u32;
let right = (row * self.grid_size + self.grid_size - 1) as u32;
adj.entry(source).or_default().push((left, boundary_weight));
adj.entry(left).or_default().push((source, boundary_weight));
adj.entry(right).or_default().push((sink, boundary_weight));
adj.entry(sink).or_default().push((right, boundary_weight));
}
// Max-flow = min-cut
let mut capacity: HashMap<(u32, u32), f64> = HashMap::new();
for (&u, neighbors) in &adj {
for &(v, w) in neighbors {
*capacity.entry((u, v)).or_default() += w;
}
}
let mut max_flow = 0.0;
loop {
// BFS for augmenting path
let mut parent: HashMap<u32, u32> = HashMap::new();
let mut visited = HashSet::new();
let mut queue = std::collections::VecDeque::new();
queue.push_back(source);
visited.insert(source);
while let Some(u) = queue.pop_front() {
if u == sink {
break;
}
if let Some(neighbors) = adj.get(&u) {
for &(v, _) in neighbors {
let cap = capacity.get(&(u, v)).copied().unwrap_or(0.0);
if !visited.contains(&v) && cap > 1e-10 {
visited.insert(v);
parent.insert(v, u);
queue.push_back(v);
}
}
}
}
if !parent.contains_key(&sink) {
break;
}
// Find bottleneck
let mut path_flow = f64::INFINITY;
let mut v = sink;
while v != source {
let u = parent[&v];
path_flow = path_flow.min(capacity.get(&(u, v)).copied().unwrap_or(0.0));
v = u;
}
// Update capacities
v = sink;
while v != source {
let u = parent[&v];
*capacity.entry((u, v)).or_default() -= path_flow;
*capacity.entry((v, u)).or_default() += path_flow;
v = u;
}
max_flow += path_flow;
}
max_flow
}
fn compute_region_mask(&self, fired_set: &HashSet<usize>) -> u64 {
// Split into 4 quadrants
let half = self.grid_size / 2;
let mut mask = 0u64;
for &det in fired_set {
let row = det / self.grid_size;
let col = det % self.grid_size;
let quadrant = match (row < half, col < half) {
(true, true) => 0, // Top-left
(true, false) => 1, // Top-right
(false, true) => 2, // Bottom-left
(false, false) => 3, // Bottom-right
};
mask |= 1 << quadrant;
}
mask
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Decision {
Permit,
Defer,
Deny,
}
#[derive(Debug, Clone)]
struct GateResult {
min_cut: f64,
risk: f64,
region_mask: u64,
decision: Decision,
latency_ns: u64,
fired_count: usize,
}
// ============================================================================
// METRICS OUTPUT
// ============================================================================
#[derive(Debug, serde::Serialize)]
struct DemoMetrics {
config: MetricsConfig,
summary: MetricsSummary,
latency: LatencyMetrics,
decisions: DecisionMetrics,
histogram: Vec<HistogramBucket>,
}
#[derive(Debug, serde::Serialize)]
struct MetricsConfig {
code_distance: usize,
error_rate: f64,
num_rounds: usize,
seed: u64,
threshold: f64,
}
#[derive(Debug, serde::Serialize)]
struct MetricsSummary {
total_rounds: usize,
total_time_ms: f64,
throughput_per_sec: f64,
total_fired: usize,
avg_fired_per_round: f64,
}
#[derive(Debug, serde::Serialize)]
struct LatencyMetrics {
mean_ns: f64,
p50_ns: u64,
p99_ns: u64,
p999_ns: u64,
max_ns: u64,
}
#[derive(Debug, serde::Serialize)]
struct DecisionMetrics {
permits: usize,
defers: usize,
denies: usize,
permit_rate: f64,
deny_rate: f64,
}
#[derive(Debug, serde::Serialize)]
struct HistogramBucket {
start_ns: u64,
end_ns: u64,
count: usize,
}
// ============================================================================
// MAIN
// ============================================================================
fn main() {
let config = parse_args();
println!("╔═══════════════════════════════════════════════════════════════════╗");
println!("║ ruQu Demo - Proof Artifact ║");
println!("╠═══════════════════════════════════════════════════════════════════╣");
println!(
"║ Code Distance: d={} | Error Rate: {:.4} | Rounds: {:>6}",
config.code_distance, config.error_rate, config.num_rounds
);
println!(
"║ Threshold: {:.2} | Seed: {:>10}",
config.threshold, config.seed
);
println!("╚═══════════════════════════════════════════════════════════════════╝");
println!();
// Initialize components
let surface_config =
SurfaceCodeConfig::new(config.code_distance, config.error_rate).with_seed(config.seed);
let mut syndrome_source = match StimSyndromeSource::new(surface_config) {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to create syndrome source: {:?}", e);
std::process::exit(1);
}
};
let gate = MinCutGate::new(config.code_distance, config.error_rate, config.threshold);
let mut latency_tracker = LatencyTracker::new(1000);
// Counters
let mut permits = 0usize;
let mut defers = 0usize;
let mut denies = 0usize;
let mut total_fired = 0usize;
// Run demo
println!("Round │ Cut │ Risk │ Decision │ Regions │ Latency │ Fired");
println!("──────┼───────┼───────┼──────────┼─────────┼─────────┼──────");
let start_time = Instant::now();
for round in 0..config.num_rounds {
// Get syndrome
let syndrome: DetectorBitmap = match syndrome_source.sample() {
Ok(s) => s,
Err(_) => continue,
};
// Process through gate
let result = gate.process(&syndrome);
latency_tracker.record(result.latency_ns);
total_fired += result.fired_count;
// Update counters
match result.decision {
Decision::Permit => permits += 1,
Decision::Defer => defers += 1,
Decision::Deny => denies += 1,
}
// Print live status
if round % config.print_interval == 0 || result.decision == Decision::Deny {
let decision_str = match result.decision {
Decision::Permit => "\x1b[32mPERMIT\x1b[0m ",
Decision::Defer => "\x1b[33mDEFER\x1b[0m ",
Decision::Deny => "\x1b[31mDENY\x1b[0m ",
};
println!(
"{:>5}{:>5.2}{:>5.2}{}{:>07b}{:>5}ns │ {:>3}",
round,
result.min_cut,
result.risk,
decision_str,
result.region_mask,
result.latency_ns,
result.fired_count
);
}
}
let total_time = start_time.elapsed();
// Summary
println!();
println!("╔═══════════════════════════════════════════════════════════════════╗");
println!("║ RESULTS SUMMARY ║");
println!("╠═══════════════════════════════════════════════════════════════════╣");
println!(
"║ Total Time: {:>10.2} ms ║",
total_time.as_secs_f64() * 1000.0
);
println!(
"║ Throughput: {:>10.0} rounds/sec ║",
config.num_rounds as f64 / total_time.as_secs_f64()
);
println!(
"║ Avg Fired/Round: {:>10.2}",
total_fired as f64 / config.num_rounds as f64
);
println!("╠═══════════════════════════════════════════════════════════════════╣");
println!("║ Latency: ║");
println!(
"║ Mean: {:>8.0} ns ║",
latency_tracker.mean()
);
println!(
"║ P50: {:>8} ns ║",
latency_tracker.p50()
);
println!(
"║ P99: {:>8} ns ║",
latency_tracker.p99()
);
println!(
"║ P999: {:>8} ns ║",
latency_tracker.p999()
);
println!(
"║ Max: {:>8} ns ║",
latency_tracker.max()
);
println!("╠═══════════════════════════════════════════════════════════════════╣");
println!("║ Decisions: ║");
println!(
"║ Permits: {:>6} ({:>5.1}%) ║",
permits,
permits as f64 / config.num_rounds as f64 * 100.0
);
println!(
"║ Defers: {:>6} ({:>5.1}%) ║",
defers,
defers as f64 / config.num_rounds as f64 * 100.0
);
println!(
"║ Denies: {:>6} ({:>5.1}%) ║",
denies,
denies as f64 / config.num_rounds as f64 * 100.0
);
println!("╚═══════════════════════════════════════════════════════════════════╝");
// Write metrics file
if let Some(output_file) = &config.output_file {
let metrics = DemoMetrics {
config: MetricsConfig {
code_distance: config.code_distance,
error_rate: config.error_rate,
num_rounds: config.num_rounds,
seed: config.seed,
threshold: config.threshold,
},
summary: MetricsSummary {
total_rounds: config.num_rounds,
total_time_ms: total_time.as_secs_f64() * 1000.0,
throughput_per_sec: config.num_rounds as f64 / total_time.as_secs_f64(),
total_fired,
avg_fired_per_round: total_fired as f64 / config.num_rounds as f64,
},
latency: LatencyMetrics {
mean_ns: latency_tracker.mean(),
p50_ns: latency_tracker.p50(),
p99_ns: latency_tracker.p99(),
p999_ns: latency_tracker.p999(),
max_ns: latency_tracker.max(),
},
decisions: DecisionMetrics {
permits,
defers,
denies,
permit_rate: permits as f64 / config.num_rounds as f64,
deny_rate: denies as f64 / config.num_rounds as f64,
},
histogram: latency_tracker
.histogram(20)
.into_iter()
.map(|(start, end, count)| HistogramBucket {
start_ns: start,
end_ns: end,
count,
})
.collect(),
};
match File::create(output_file) {
Ok(mut file) => {
let json = serde_json::to_string_pretty(&metrics).unwrap();
file.write_all(json.as_bytes()).unwrap();
println!("\nMetrics written to: {}", output_file);
}
Err(e) => {
eprintln!("Failed to write metrics file: {}", e);
}
}
}
// Latency histogram
println!("\nLatency Histogram:");
let histogram = latency_tracker.histogram(10);
let max_count = histogram.iter().map(|(_, _, c)| *c).max().unwrap_or(1);
for (start, end, count) in histogram {
let bar_len = (count as f64 / max_count as f64 * 40.0) as usize;
let bar = "".repeat(bar_len);
println!("{:>8}-{:<8}{:<40} {:>5}", start, end, bar, count);
}
}

View File

@@ -0,0 +1,856 @@
//! ruQu Predictive Evaluation Binary
//!
//! This binary produces formal evaluation metrics for ruQu's predictive capabilities.
//! It demonstrates that ruQu can detect logical failure risk BEFORE it manifests.
//!
//! ## Usage
//!
//! ```bash
//! cargo run --bin ruqu_predictive_eval --release -- \
//! --distance 5 \
//! --error-rate 0.001 \
//! --runs 100
//! ```
//!
//! ## Output
//!
//! Produces DARPA-style evaluation metrics including:
//! - Lead time distribution (median, p10, p90)
//! - Precision and recall
//! - False alarm rate per 100k cycles
//! - Actionability for different mitigation windows
use std::collections::{HashMap, HashSet, VecDeque};
use std::time::Instant;
use ruqu::stim::{StimSyndromeSource, SurfaceCodeConfig};
use ruqu::syndrome::DetectorBitmap;
// ============================================================================
// CONFIGURATION
// ============================================================================
#[derive(Debug, Clone)]
struct EvalConfig {
code_distance: usize,
error_rate: f64,
num_runs: usize,
cycles_per_run: usize,
seed: u64,
inject_mode: InjectMode,
}
#[derive(Debug, Clone, Copy)]
enum InjectMode {
/// Independent noise only (baseline)
Independent,
/// Correlated burst injection
CorrelatedBurst,
/// Both modes for comparison
Both,
}
impl Default for EvalConfig {
fn default() -> Self {
Self {
code_distance: 5,
error_rate: 0.001,
num_runs: 100,
cycles_per_run: 500,
seed: 42,
inject_mode: InjectMode::CorrelatedBurst,
}
}
}
fn parse_args() -> EvalConfig {
let args: Vec<String> = std::env::args().collect();
let mut config = EvalConfig::default();
let mut i = 1;
while i < args.len() {
match args[i].as_str() {
"--distance" | "-d" => {
i += 1;
config.code_distance = args[i].parse().expect("Invalid distance");
}
"--error-rate" | "-e" => {
i += 1;
config.error_rate = args[i].parse().expect("Invalid error rate");
}
"--runs" | "-r" => {
i += 1;
config.num_runs = args[i].parse().expect("Invalid runs");
}
"--cycles" | "-c" => {
i += 1;
config.cycles_per_run = args[i].parse().expect("Invalid cycles");
}
"--seed" | "-s" => {
i += 1;
config.seed = args[i].parse().expect("Invalid seed");
}
"--inject" => {
i += 1;
config.inject_mode = match args[i].as_str() {
"independent" => InjectMode::Independent,
"burst" | "correlated" | "correlated_burst" => InjectMode::CorrelatedBurst,
"both" => InjectMode::Both,
_ => panic!("Invalid inject mode: {}", args[i]),
};
}
"--help" | "-h" => {
print_help();
std::process::exit(0);
}
_ => {
eprintln!("Unknown argument: {}", args[i]);
print_help();
std::process::exit(1);
}
}
i += 1;
}
config
}
fn print_help() {
println!("ruQu Predictive Evaluation");
println!();
println!("USAGE:");
println!(" ruqu_predictive_eval [OPTIONS]");
println!();
println!("OPTIONS:");
println!(" -d, --distance <N> Code distance (default: 5)");
println!(" -e, --error-rate <F> Physical error rate (default: 0.001)");
println!(" -r, --runs <N> Number of evaluation runs (default: 100)");
println!(" -c, --cycles <N> Cycles per run (default: 500)");
println!(" -s, --seed <N> Random seed (default: 42)");
println!(" --inject <MODE> Injection mode: independent, burst, both");
println!(" -h, --help Print this help");
}
// ============================================================================
// STRUCTURAL SIGNAL WITH DYNAMICS
// ============================================================================
/// Structural signal with cut dynamics (velocity and curvature)
#[derive(Debug, Clone, Default)]
pub struct StructuralSignal {
/// Current min-cut value
pub cut: f64,
/// Rate of change (Δλ)
pub velocity: f64,
/// Acceleration of change (Δ²λ)
pub curvature: f64,
/// Baseline mean for adaptive thresholding
pub baseline_mean: f64,
/// Baseline standard deviation
pub baseline_std: f64,
}
/// Warning detector with velocity and curvature tracking
struct WarningDetector {
history: VecDeque<f64>,
velocity_history: VecDeque<f64>,
max_history: usize,
warmup_samples: usize,
baseline_mean: f64,
baseline_std: f64,
theta_sigma: f64,
theta_absolute: f64,
delta: f64,
lookback: usize,
min_event_count: usize,
}
impl WarningDetector {
fn new() -> Self {
Self {
history: VecDeque::new(),
velocity_history: VecDeque::new(),
max_history: 100,
warmup_samples: 20,
baseline_mean: 0.0,
baseline_std: 0.0,
theta_sigma: 2.5,
theta_absolute: 2.0,
delta: 1.2,
lookback: 5,
min_event_count: 5,
}
}
fn push(&mut self, cut: f64) {
// Track velocity
if let Some(&prev) = self.history.back() {
let velocity = cut - prev;
self.velocity_history.push_back(velocity);
if self.velocity_history.len() > self.max_history {
self.velocity_history.pop_front();
}
}
self.history.push_back(cut);
if self.history.len() > self.max_history {
self.history.pop_front();
}
// Update baseline during warmup
if self.history.len() <= self.warmup_samples {
let sum: f64 = self.history.iter().sum();
self.baseline_mean = sum / self.history.len() as f64;
if self.history.len() > 1 {
let variance: f64 = self
.history
.iter()
.map(|x| (x - self.baseline_mean).powi(2))
.sum::<f64>()
/ (self.history.len() - 1) as f64;
self.baseline_std = variance.sqrt();
}
}
}
fn current(&self) -> f64 {
self.history.back().copied().unwrap_or(0.0)
}
fn velocity(&self) -> f64 {
self.velocity_history.back().copied().unwrap_or(0.0)
}
fn curvature(&self) -> f64 {
if self.velocity_history.len() < 2 {
return 0.0;
}
let n = self.velocity_history.len();
self.velocity_history[n - 1] - self.velocity_history[n - 2]
}
fn signal(&self) -> StructuralSignal {
StructuralSignal {
cut: self.current(),
velocity: self.velocity(),
curvature: self.curvature(),
baseline_mean: self.baseline_mean,
baseline_std: self.baseline_std,
}
}
fn drop_from_lookback(&self) -> f64 {
if self.history.len() <= self.lookback {
return 0.0;
}
let n = self.history.len();
self.history[n - 1] - self.history[n - 1 - self.lookback]
}
fn is_warning(&self, event_count: usize) -> bool {
if self.history.len() < self.warmup_samples {
return false;
}
if self.baseline_mean == 0.0 {
return false;
}
let adaptive_threshold =
(self.baseline_mean - self.theta_sigma * self.baseline_std).max(0.5);
let below_adaptive = self.current() <= adaptive_threshold;
let below_absolute = self.current() <= self.theta_absolute;
let rapid_drop = self.drop_from_lookback() <= -self.delta;
let high_events = event_count >= self.min_event_count;
// AND mode: structural + drop + intensity
(below_adaptive || below_absolute) && rapid_drop && high_events
}
}
// ============================================================================
// QEC GRAPH CONSTRUCTION
// ============================================================================
struct STMinCutGraph {
num_nodes: u32,
edges: Vec<(u32, u32, f64)>,
source_edges: Vec<(u32, f64)>,
sink_edges: Vec<(u32, f64)>,
}
impl STMinCutGraph {
fn new(num_nodes: u32) -> Self {
Self {
num_nodes,
edges: Vec::new(),
source_edges: Vec::new(),
sink_edges: Vec::new(),
}
}
fn add_edge(&mut self, u: u32, v: u32, weight: f64) {
self.edges.push((u, v, weight));
}
fn connect_source(&mut self, node: u32, weight: f64) {
self.source_edges.push((node, weight));
}
fn connect_sink(&mut self, node: u32, weight: f64) {
self.sink_edges.push((node, weight));
}
fn compute_min_cut(&self) -> f64 {
// BFS-based approximation
let mut visited = vec![false; self.num_nodes as usize];
let mut queue = VecDeque::new();
let mut total_flow = 0.0;
// Build adjacency with capacities
let mut adj: HashMap<u32, Vec<(u32, f64)>> = HashMap::new();
for &(u, v, w) in &self.edges {
adj.entry(u).or_default().push((v, w));
adj.entry(v).or_default().push((u, w));
}
// Start from source-connected nodes
for &(node, cap) in &self.source_edges {
if !visited[node as usize] {
queue.push_back((node, cap));
visited[node as usize] = true;
}
}
// BFS to sink
let sink_set: HashSet<u32> = self.sink_edges.iter().map(|(n, _)| *n).collect();
while let Some((current, flow)) = queue.pop_front() {
if sink_set.contains(&current) {
total_flow += flow;
continue;
}
if let Some(neighbors) = adj.get(&current) {
for &(next, cap) in neighbors {
if !visited[next as usize] {
visited[next as usize] = true;
let next_flow = flow.min(cap);
queue.push_back((next, next_flow));
}
}
}
}
// Return cut value (inverse of flow for this approximation)
let source_capacity: f64 = self.source_edges.iter().map(|(_, c)| c).sum();
(source_capacity - total_flow).max(0.1)
}
}
fn build_qec_graph(
code_distance: usize,
error_rate: f64,
syndrome: &DetectorBitmap,
) -> STMinCutGraph {
let grid_size = code_distance - 1;
let num_detectors = 2 * grid_size * grid_size;
let mut graph = STMinCutGraph::new(num_detectors as u32);
let fired_set: HashSet<usize> = syndrome.iter_fired().collect();
let base_weight = (-error_rate.ln()).max(0.1);
let fired_weight = 0.01;
// Build X-stabilizer grid
for row in 0..grid_size {
for col in 0..grid_size {
let node = (row * grid_size + col) as u32;
let is_fired = fired_set.contains(&(node as usize));
if col + 1 < grid_size {
let right = (row * grid_size + col + 1) as u32;
let right_fired = fired_set.contains(&(right as usize));
let weight = if is_fired || right_fired {
fired_weight
} else {
base_weight
};
graph.add_edge(node, right, weight);
}
if row + 1 < grid_size {
let bottom = ((row + 1) * grid_size + col) as u32;
let bottom_fired = fired_set.contains(&(bottom as usize));
let weight = if is_fired || bottom_fired {
fired_weight
} else {
base_weight
};
graph.add_edge(node, bottom, weight);
}
}
}
let boundary_weight = base_weight * 2.0;
for row in 0..grid_size {
let left = (row * grid_size) as u32;
let right = (row * grid_size + grid_size - 1) as u32;
graph.connect_source(left, boundary_weight);
graph.connect_sink(right, boundary_weight);
}
graph
}
// ============================================================================
// GROUND TRUTH
// ============================================================================
fn is_logical_failure(syndrome: &DetectorBitmap, code_distance: usize) -> bool {
let grid_size = code_distance - 1;
let fired: HashSet<usize> = syndrome.iter_fired().collect();
if fired.is_empty() {
return false;
}
let left_boundary: Vec<usize> = (0..grid_size)
.map(|row| row * grid_size)
.filter(|&d| fired.contains(&d))
.collect();
if left_boundary.is_empty() {
return false;
}
let mut visited: HashSet<usize> = HashSet::new();
let mut queue: VecDeque<usize> = VecDeque::new();
for &start in &left_boundary {
queue.push_back(start);
visited.insert(start);
}
while let Some(current) = queue.pop_front() {
let row = current / grid_size;
let col = current % grid_size;
if col == grid_size - 1 {
return true;
}
let neighbors = [
if col > 0 {
Some(row * grid_size + col - 1)
} else {
None
},
if col + 1 < grid_size {
Some(row * grid_size + col + 1)
} else {
None
},
if row > 0 {
Some((row - 1) * grid_size + col)
} else {
None
},
if row + 1 < grid_size {
Some((row + 1) * grid_size + col)
} else {
None
},
];
for neighbor_opt in neighbors.iter().flatten() {
let neighbor = *neighbor_opt;
if fired.contains(&neighbor) && !visited.contains(&neighbor) {
visited.insert(neighbor);
queue.push_back(neighbor);
}
}
}
false
}
// ============================================================================
// EVALUATION RESULTS
// ============================================================================
#[derive(Default)]
struct EvalResults {
total_cycles: u64,
failures_observed: u64,
warnings_issued: u64,
true_warnings: u64,
false_warnings: u64,
lead_times: Vec<u64>,
}
impl EvalResults {
fn precision(&self) -> f64 {
if self.warnings_issued == 0 {
return 0.0;
}
self.true_warnings as f64 / self.warnings_issued as f64
}
fn recall(&self) -> f64 {
if self.failures_observed == 0 {
return 0.0;
}
self.true_warnings as f64 / self.failures_observed as f64
}
fn false_alarms_per_100k(&self) -> f64 {
if self.total_cycles == 0 {
return 0.0;
}
self.false_warnings as f64 / self.total_cycles as f64 * 100_000.0
}
fn median_lead_time(&self) -> f64 {
if self.lead_times.is_empty() {
return 0.0;
}
let mut sorted = self.lead_times.clone();
sorted.sort();
sorted[sorted.len() / 2] as f64
}
fn p10_lead_time(&self) -> f64 {
if self.lead_times.is_empty() {
return 0.0;
}
let mut sorted = self.lead_times.clone();
sorted.sort();
let idx = (sorted.len() as f64 * 0.10) as usize;
sorted[idx.min(sorted.len() - 1)] as f64
}
fn p90_lead_time(&self) -> f64 {
if self.lead_times.is_empty() {
return 0.0;
}
let mut sorted = self.lead_times.clone();
sorted.sort();
let idx = (sorted.len() as f64 * 0.90) as usize;
sorted[idx.min(sorted.len() - 1)] as f64
}
fn actionable_rate(&self, min_cycles: u64) -> f64 {
if self.lead_times.is_empty() {
return 0.0;
}
let actionable = self.lead_times.iter().filter(|&&t| t >= min_cycles).count();
actionable as f64 / self.lead_times.len() as f64
}
}
// ============================================================================
// SYNDROME GENERATOR WITH BURST INJECTION
// ============================================================================
struct SyndromeGenerator {
source: StimSyndromeSource,
burst_active: bool,
burst_remaining: usize,
burst_center: usize,
burst_radius: usize,
code_distance: usize,
}
impl SyndromeGenerator {
fn new(code_distance: usize, error_rate: f64, seed: u64) -> Self {
let config = SurfaceCodeConfig {
distance: code_distance,
error_rate,
seed: Some(seed),
rounds: 1,
rotated: false,
measure_errors: true,
};
Self {
source: StimSyndromeSource::new(config).expect("Failed to create source"),
burst_active: false,
burst_remaining: 0,
burst_center: 0,
burst_radius: 2,
code_distance,
}
}
fn inject_burst(&mut self, duration: usize, center: usize) {
self.burst_active = true;
self.burst_remaining = duration;
self.burst_center = center;
}
fn sample(&mut self) -> DetectorBitmap {
let mut syndrome = self.source.sample().unwrap_or_else(|_| {
DetectorBitmap::new(2 * (self.code_distance - 1) * (self.code_distance - 1))
});
if self.burst_active && self.burst_remaining > 0 {
let grid_size = self.code_distance - 1;
let center_row = self.burst_center / grid_size;
let center_col = self.burst_center % grid_size;
for dr in 0..=self.burst_radius {
for dc in 0..=self.burst_radius {
if dr == 0 && dc == 0 {
continue;
}
for &(sr, sc) in &[(1i32, 1i32), (1, -1), (-1, 1), (-1, -1)] {
let row = center_row as i32 + dr as i32 * sr;
let col = center_col as i32 + dc as i32 * sc;
if row >= 0 && row < grid_size as i32 && col >= 0 && col < grid_size as i32
{
let detector = (row as usize) * grid_size + (col as usize);
if detector < syndrome.detector_count() {
syndrome.set(detector, true);
}
}
}
}
}
if self.burst_center < syndrome.detector_count() {
syndrome.set(self.burst_center, true);
}
self.burst_remaining -= 1;
if self.burst_remaining == 0 {
self.burst_active = false;
}
}
syndrome
}
}
// ============================================================================
// MAIN EVALUATION
// ============================================================================
fn run_evaluation(config: &EvalConfig, with_bursts: bool) -> EvalResults {
let mut results = EvalResults::default();
let grid_size = config.code_distance - 1;
let num_detectors = 2 * grid_size * grid_size;
for run in 0..config.num_runs {
let seed = config.seed + run as u64;
let mut generator = SyndromeGenerator::new(config.code_distance, config.error_rate, seed);
let mut detector = WarningDetector::new();
let mut warning_active = false;
let mut warning_start = 0u64;
let mut cycles_since_warning = 0u64;
// Schedule burst injection at random point
let burst_cycle = if with_bursts {
(seed % (config.cycles_per_run as u64 / 2)) as usize + config.cycles_per_run / 4
} else {
usize::MAX
};
let burst_duration = 8;
let burst_center = ((seed * 7) % num_detectors as u64) as usize;
for cycle in 0..config.cycles_per_run {
// Inject burst at scheduled time
if cycle == burst_cycle && with_bursts {
generator.inject_burst(burst_duration, burst_center);
}
let syndrome = generator.sample();
let graph = build_qec_graph(config.code_distance, config.error_rate, &syndrome);
let cut = graph.compute_min_cut();
let event_count = syndrome.fired_count();
detector.push(cut);
let is_failure = is_logical_failure(&syndrome, config.code_distance);
let is_warning = detector.is_warning(event_count);
// Track warning onset
if is_warning && !warning_active {
warning_active = true;
warning_start = cycle as u64;
cycles_since_warning = 0;
results.warnings_issued += 1;
}
if warning_active {
cycles_since_warning += 1;
}
// Track failures
if is_failure {
results.failures_observed += 1;
if warning_active && cycles_since_warning > 0 {
results.true_warnings += 1;
results.lead_times.push(cycles_since_warning);
}
// Reset warning state after failure
warning_active = false;
}
// Timeout warnings without failure (false alarm)
if warning_active && cycles_since_warning > 20 {
results.false_warnings += 1;
warning_active = false;
}
results.total_cycles += 1;
}
}
results
}
fn main() {
let config = parse_args();
let start_time = Instant::now();
println!();
println!("╔═══════════════════════════════════════════════════════════════════════╗");
println!("║ ruQu PREDICTIVE EVALUATION ║");
println!("║ Formal Metrics for Early Warning ║");
println!("╚═══════════════════════════════════════════════════════════════════════╝");
println!();
println!("Configuration:");
println!(" Code Distance: d={}", config.code_distance);
println!(" Error Rate: {:.4}", config.error_rate);
println!(" Runs: {}", config.num_runs);
println!(" Cycles/Run: {}", config.cycles_per_run);
println!(" Seed: {}", config.seed);
println!(" Inject Mode: {:?}", config.inject_mode);
// Run with correlated bursts
let results = match config.inject_mode {
InjectMode::Independent => run_evaluation(&config, false),
InjectMode::CorrelatedBurst => run_evaluation(&config, true),
InjectMode::Both => {
println!();
println!("═══════════════════════════════════════════════════════════════════════");
println!(" REGIME A: Independent Noise");
println!("═══════════════════════════════════════════════════════════════════════");
let independent = run_evaluation(&config, false);
print_results(&independent);
println!();
println!("═══════════════════════════════════════════════════════════════════════");
println!(" REGIME B: Correlated Bursts");
println!("═══════════════════════════════════════════════════════════════════════");
let bursts = run_evaluation(&config, true);
print_results(&bursts);
bursts
}
};
if !matches!(config.inject_mode, InjectMode::Both) {
println!();
println!("═══════════════════════════════════════════════════════════════════════");
println!(" EVALUATION RESULTS");
println!("═══════════════════════════════════════════════════════════════════════");
print_results(&results);
}
// Actionability breakdown
println!();
println!("═══════════════════════════════════════════════════════════════════════");
println!(" ACTIONABILITY");
println!("═══════════════════════════════════════════════════════════════════════");
println!();
println!(
" Decoder switch (1 cycle): {:>5.1}%",
results.actionable_rate(1) * 100.0
);
println!(
" Extra syndrome round (2 cycles): {:>5.1}%",
results.actionable_rate(2) * 100.0
);
println!(
" Region quarantine (5 cycles): {:>5.1}%",
results.actionable_rate(5) * 100.0
);
println!(
" Full recalibration (10 cycles): {:>5.1}%",
results.actionable_rate(10) * 100.0
);
// Summary
println!();
println!("═══════════════════════════════════════════════════════════════════════");
println!(" SUMMARY");
println!("═══════════════════════════════════════════════════════════════════════");
let predictive = results.recall() >= 0.80
&& results.false_alarms_per_100k() < 50.0
&& results.median_lead_time() >= 2.0;
if predictive {
println!();
println!(" ✓ PREDICTIVE: ruQu satisfies all criteria");
println!(" - Recall >= 80%: {:.1}%", results.recall() * 100.0);
println!(
" - False alarms < 50/100k: {:.1}/100k",
results.false_alarms_per_100k()
);
println!(
" - Median lead >= 2 cycles: {:.1} cycles",
results.median_lead_time()
);
} else {
println!();
println!(" ~ PARTIAL: Some criteria not met");
println!(
" - Recall: {:.1}% (target: >=80%)",
results.recall() * 100.0
);
println!(
" - False alarms: {:.1}/100k (target: <50)",
results.false_alarms_per_100k()
);
println!(
" - Median lead: {:.1} cycles (target: >=2)",
results.median_lead_time()
);
}
let elapsed = start_time.elapsed();
println!();
println!(" Total time: {:.2}s", elapsed.as_secs_f64());
println!(
" Throughput: {:.0} cycles/sec",
results.total_cycles as f64 / elapsed.as_secs_f64()
);
println!();
}
fn print_results(results: &EvalResults) {
println!();
println!("Failures observed: {}", results.failures_observed);
println!("Warnings issued: {}", results.warnings_issued);
println!("True warnings: {}", results.true_warnings);
println!("False warnings: {}", results.false_warnings);
println!();
println!("Lead time (cycles):");
println!(" median: {:.1}", results.median_lead_time());
println!(" p10: {:.1}", results.p10_lead_time());
println!(" p90: {:.1}", results.p90_lead_time());
println!();
println!("Precision: {:.2}", results.precision());
println!("Recall: {:.2}", results.recall());
println!(
"False alarms: {:.1} / 100k cycles",
results.false_alarms_per_100k()
);
}

View File

@@ -0,0 +1,475 @@
//! Quantum Error Decoder Integration
//!
//! Integrates the fusion-blossom Minimum-Weight Perfect Matching (MWPM) decoder
//! for quantum error syndrome decoding.
//!
//! ## Features
//!
//! When the `decoder` feature is enabled, this module provides:
//! - Real MWPM decoding via fusion-blossom
//! - Syndrome graph construction from detector events
//! - Correction suggestion generation
//!
//! When disabled, a fast heuristic fallback is used.
//!
//! ## Performance
//!
//! fusion-blossom is optimized for real-time decoding:
//! - O(V^3) worst case, O(V) typical for sparse syndromes
//! - Parallelizable for large code distances
use crate::syndrome::DetectorBitmap;
/// Decoder configuration
#[derive(Debug, Clone)]
pub struct DecoderConfig {
/// Code distance (determines graph size)
pub distance: usize,
/// Physical error probability
pub physical_error_rate: f64,
/// Number of syndrome rounds to consider
pub window_size: usize,
/// Enable parallel decoding (when supported)
pub parallel: bool,
}
impl Default for DecoderConfig {
fn default() -> Self {
Self {
distance: 7,
physical_error_rate: 0.001,
window_size: 1,
parallel: false,
}
}
}
/// Correction suggestion from the decoder
#[derive(Debug, Clone)]
pub struct Correction {
/// Data qubit indices to apply X correction
pub x_corrections: Vec<usize>,
/// Data qubit indices to apply Z correction
pub z_corrections: Vec<usize>,
/// Confidence score (0.0 to 1.0)
pub confidence: f64,
/// Decoder runtime in nanoseconds
pub decode_time_ns: u64,
}
impl Default for Correction {
fn default() -> Self {
Self {
x_corrections: Vec::new(),
z_corrections: Vec::new(),
confidence: 1.0,
decode_time_ns: 0,
}
}
}
/// MWPM Decoder using fusion-blossom
///
/// Provides minimum-weight perfect matching decoding for surface code syndromes.
#[cfg(feature = "decoder")]
pub struct MWPMDecoder {
config: DecoderConfig,
/// Pre-built syndrome graph for the surface code
solver: fusion_blossom::mwpm_solver::SolverSerial,
/// Vertex count in the matching graph
vertex_count: usize,
/// Edge definitions: (v1, v2, weight)
edges: Vec<(usize, usize, i32)>,
/// Mapping from detector index to vertex
detector_to_vertex: Vec<usize>,
}
#[cfg(feature = "decoder")]
impl MWPMDecoder {
/// Create a new MWPM decoder for a surface code of given distance
pub fn new(config: DecoderConfig) -> Self {
use fusion_blossom::mwpm_solver::{SolverInitializer, SolverSerial};
use fusion_blossom::util::*;
let d = config.distance;
// For a distance-d surface code, we have approximately d^2 data qubits
// and (d^2-1)/2 X-type + (d^2-1)/2 Z-type stabilizers
let num_detectors = d * d;
let vertex_count = num_detectors + 1; // +1 for virtual boundary vertex
// Build edges between neighboring detectors
// Weight is -log(p) scaled to integer
let weight = (-(config.physical_error_rate.ln()) * 1000.0) as i32;
let mut edges = Vec::new();
// Grid connectivity for surface code
for row in 0..d {
for col in 0..d {
let v = row * d + col;
// Connect to right neighbor
if col + 1 < d {
let neighbor = row * d + (col + 1);
edges.push((v, neighbor, weight));
}
// Connect to bottom neighbor
if row + 1 < d {
let neighbor = (row + 1) * d + col;
edges.push((v, neighbor, weight));
}
}
}
// Connect boundary vertices to virtual boundary
let boundary_vertex = num_detectors;
for col in 0..d {
edges.push((col, boundary_vertex, weight / 2)); // Top edge
edges.push(((d - 1) * d + col, boundary_vertex, weight / 2)); // Bottom edge
}
for row in 0..d {
edges.push((row * d, boundary_vertex, weight / 2)); // Left edge
edges.push((row * d + (d - 1), boundary_vertex, weight / 2)); // Right edge
}
// Convert to fusion-blossom format
let fb_edges: Vec<(VertexIndex, VertexIndex, Weight)> = edges
.iter()
.map(|(v1, v2, w)| (*v1 as VertexIndex, *v2 as VertexIndex, *w as Weight))
.collect();
// Create initializer
let initializer = SolverInitializer::new(vertex_count as VertexNum, fb_edges);
let solver = SolverSerial::new(&initializer);
// Simple 1:1 detector mapping for now
let detector_to_vertex: Vec<usize> = (0..num_detectors).collect();
Self {
config,
solver,
vertex_count,
edges,
detector_to_vertex,
}
}
/// Decode a syndrome bitmap and return correction suggestions
pub fn decode(&mut self, syndrome: &DetectorBitmap) -> Correction {
use fusion_blossom::mwpm_solver::PrimalDualSolver;
use std::time::Instant;
let start = Instant::now();
// Clear previous syndrome
self.solver.clear();
// Add defects (fired detectors) to the solver
let mut defect_vertices = Vec::new();
for detector_idx in syndrome.iter_fired() {
if detector_idx < self.detector_to_vertex.len() {
let vertex = self.detector_to_vertex[detector_idx];
defect_vertices.push(vertex as fusion_blossom::util::VertexIndex);
}
}
// Must have even number of defects for perfect matching
// If odd, add virtual boundary vertex
if defect_vertices.len() % 2 == 1 {
defect_vertices.push((self.vertex_count - 1) as fusion_blossom::util::VertexIndex);
}
// Set syndrome and solve
self.solver.solve_visualizer(None);
// Extract matching
let matching = self.solver.perfect_matching();
// Convert matching to corrections
// Each matched pair indicates an error chain
let mut x_corrections = Vec::new();
let d = self.config.distance;
for (v1, v2) in matching.iter() {
let v1 = *v1 as usize;
let v2 = *v2 as usize;
// Find data qubits along the path between v1 and v2
if v1 < d * d && v2 < d * d {
// Both are real detectors - correction on data qubit between them
let row1 = v1 / d;
let col1 = v1 % d;
let row2 = v2 / d;
let col2 = v2 % d;
// Simple: correct all data qubits in the bounding box
let min_row = row1.min(row2);
let max_row = row1.max(row2);
let min_col = col1.min(col2);
let max_col = col1.max(col2);
for r in min_row..=max_row {
for c in min_col..=max_col {
x_corrections.push(r * d + c);
}
}
}
}
// Deduplicate corrections (XOR logic - double correction = no correction)
x_corrections.sort_unstable();
let mut deduped = Vec::new();
let mut i = 0;
while i < x_corrections.len() {
let mut count = 1;
while i + count < x_corrections.len() && x_corrections[i] == x_corrections[i + count] {
count += 1;
}
if count % 2 == 1 {
deduped.push(x_corrections[i]);
}
i += count;
}
let elapsed = start.elapsed();
Correction {
x_corrections: deduped,
z_corrections: Vec::new(), // Z corrections from separate decoder pass
confidence: if syndrome.fired_count() == 0 {
1.0
} else {
0.9
},
decode_time_ns: elapsed.as_nanos() as u64,
}
}
/// Get decoder statistics
pub fn config(&self) -> &DecoderConfig {
&self.config
}
}
/// Heuristic decoder fallback (when fusion-blossom is not available)
#[cfg(not(feature = "decoder"))]
pub struct MWPMDecoder {
config: DecoderConfig,
}
#[cfg(not(feature = "decoder"))]
impl MWPMDecoder {
/// Create a new heuristic decoder
pub fn new(config: DecoderConfig) -> Self {
Self { config }
}
/// Decode using simple nearest-neighbor heuristic
pub fn decode(&mut self, syndrome: &DetectorBitmap) -> Correction {
let start = std::time::Instant::now();
let fired: Vec<usize> = syndrome.iter_fired().collect();
// Simple heuristic: pair adjacent fired detectors
let d = self.config.distance;
let mut x_corrections = Vec::new();
let mut used = vec![false; fired.len()];
for (i, &det1) in fired.iter().enumerate() {
if used[i] {
continue;
}
let row1 = det1 / d;
let col1 = det1 % d;
// Find nearest unmatched detector
let mut best_dist = usize::MAX;
let mut best_j = None;
for (j, &det2) in fired.iter().enumerate().skip(i + 1) {
if used[j] {
continue;
}
let row2 = det2 / d;
let col2 = det2 % d;
let dist = row1.abs_diff(row2) + col1.abs_diff(col2);
if dist < best_dist {
best_dist = dist;
best_j = Some(j);
}
}
if let Some(j) = best_j {
used[i] = true;
used[j] = true;
// Add correction between det1 and det2
let det2 = fired[j];
let row2 = det2 / d;
let col2 = det2 % d;
// Correct along Manhattan path
let min_row = row1.min(row2);
let max_row = row1.max(row2);
let min_col = col1.min(col2);
let max_col = col1.max(col2);
// Horizontal path
for c in min_col..max_col {
x_corrections.push(min_row * d + c);
}
// Vertical path
for r in min_row..max_row {
x_corrections.push(r * d + max_col);
}
}
}
let elapsed = start.elapsed();
Correction {
x_corrections,
z_corrections: Vec::new(),
confidence: if fired.is_empty() { 1.0 } else { 0.7 }, // Lower confidence for heuristic
decode_time_ns: elapsed.as_nanos() as u64,
}
}
/// Get decoder configuration
pub fn config(&self) -> &DecoderConfig {
&self.config
}
}
/// Streaming decoder for real-time syndrome processing
pub struct StreamingDecoder {
inner: MWPMDecoder,
/// Recent corrections for temporal correlation
correction_history: Vec<Correction>,
/// Maximum history size
history_size: usize,
}
impl StreamingDecoder {
/// Create a new streaming decoder
pub fn new(config: DecoderConfig) -> Self {
let history_size = config.window_size.max(10);
Self {
inner: MWPMDecoder::new(config),
correction_history: Vec::with_capacity(history_size),
history_size,
}
}
/// Process a syndrome round and return corrections
pub fn process(&mut self, syndrome: &DetectorBitmap) -> Correction {
let correction = self.inner.decode(syndrome);
// Add to history
if self.correction_history.len() >= self.history_size {
self.correction_history.remove(0);
}
self.correction_history.push(correction.clone());
correction
}
/// Get average decode time over recent history
pub fn average_decode_time_ns(&self) -> u64 {
if self.correction_history.is_empty() {
return 0;
}
let sum: u64 = self
.correction_history
.iter()
.map(|c| c.decode_time_ns)
.sum();
sum / self.correction_history.len() as u64
}
/// Get decoder configuration
pub fn config(&self) -> &DecoderConfig {
self.inner.config()
}
/// Clear correction history
pub fn clear_history(&mut self) {
self.correction_history.clear();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_decoder_config_default() {
let config = DecoderConfig::default();
assert_eq!(config.distance, 7);
assert!((config.physical_error_rate - 0.001).abs() < 1e-10);
}
#[test]
fn test_decoder_empty_syndrome() {
let config = DecoderConfig::default();
let mut decoder = MWPMDecoder::new(config);
let syndrome = DetectorBitmap::new(49); // d=7, 7*7=49 detectors
let correction = decoder.decode(&syndrome);
assert!(correction.x_corrections.is_empty());
assert_eq!(correction.confidence, 1.0);
}
#[test]
fn test_decoder_single_pair() {
let config = DecoderConfig {
distance: 5,
physical_error_rate: 0.01,
window_size: 1,
parallel: false,
};
let mut decoder = MWPMDecoder::new(config);
// Two adjacent fired detectors
let mut syndrome = DetectorBitmap::new(25); // d=5, 5*5=25 detectors
syndrome.set(0, true); // (0,0)
syndrome.set(1, true); // (0,1)
let correction = decoder.decode(&syndrome);
// Should suggest correction between them
assert!(!correction.x_corrections.is_empty());
assert!(correction.decode_time_ns > 0);
}
#[test]
fn test_streaming_decoder() {
let config = DecoderConfig::default();
let mut decoder = StreamingDecoder::new(config);
// Process several rounds
for i in 0..5 {
let mut syndrome = DetectorBitmap::new(49);
if i % 2 == 0 {
syndrome.set(0, true);
syndrome.set(6, true);
}
let _ = decoder.process(&syndrome);
}
assert!(decoder.average_decode_time_ns() > 0);
}
#[test]
fn test_correction_default() {
let correction = Correction::default();
assert!(correction.x_corrections.is_empty());
assert!(correction.z_corrections.is_empty());
assert_eq!(correction.confidence, 1.0);
}
}

348
vendor/ruvector/crates/ruQu/src/error.rs vendored Normal file
View File

@@ -0,0 +1,348 @@
//! Error types for the ruQu coherence gate system
//!
//! This module defines all error types that can occur during coherence
//! assessment, syndrome processing, and gate decision-making.
use thiserror::Error;
/// Result type alias for ruQu operations
pub type Result<T> = std::result::Result<T, RuQuError>;
/// Main error type for ruQu operations
#[derive(Error, Debug)]
pub enum RuQuError {
// ═══════════════════════════════════════════════════════════════════════
// Gate Decision Errors
// ═══════════════════════════════════════════════════════════════════════
/// Filter evaluation failed
#[error("Filter evaluation failed: {filter} - {reason}")]
FilterEvaluationFailed {
/// Which filter failed
filter: String,
/// Reason for failure
reason: String,
},
/// Gate decision timeout exceeded
#[error("Gate decision timeout: {elapsed_ns}ns exceeded {budget_ns}ns budget")]
DecisionTimeout {
/// Time elapsed in nanoseconds
elapsed_ns: u64,
/// Budget in nanoseconds
budget_ns: u64,
},
/// Invalid threshold configuration
#[error("Invalid threshold: {name} = {value} (expected {constraint})")]
InvalidThreshold {
/// Threshold name
name: String,
/// Actual value
value: f64,
/// Expected constraint description
constraint: String,
},
// ═══════════════════════════════════════════════════════════════════════
// Tile Errors
// ═══════════════════════════════════════════════════════════════════════
/// Invalid tile identifier
#[error("Invalid tile ID: {0} (valid range: 0-255)")]
InvalidTileId(u16),
/// Tile not found
#[error("Tile {0} not found in fabric")]
TileNotFound(u8),
/// Tile communication failure
#[error("Tile communication failed: tile {tile_id} - {reason}")]
TileCommunicationFailed {
/// Tile that failed
tile_id: u8,
/// Reason for failure
reason: String,
},
/// Tile memory exceeded
#[error("Tile {tile_id} memory exceeded: {used} bytes > {limit} bytes")]
TileMemoryExceeded {
/// Tile ID
tile_id: u8,
/// Memory used
used: usize,
/// Memory limit
limit: usize,
},
// ═══════════════════════════════════════════════════════════════════════
// Syndrome Errors
// ═══════════════════════════════════════════════════════════════════════
/// Syndrome buffer overflow
#[error("Syndrome buffer overflow: capacity {capacity}, attempted write at {position}")]
SyndromeBufferOverflow {
/// Buffer capacity
capacity: usize,
/// Attempted write position
position: usize,
},
/// Invalid syndrome round
#[error("Invalid syndrome round: {0}")]
InvalidSyndromeRound(String),
/// Syndrome gap detected (missing rounds)
#[error("Syndrome gap: expected round {expected}, got {actual}")]
SyndromeGap {
/// Expected round ID
expected: u64,
/// Actual round ID received
actual: u64,
},
/// Detector map mismatch
#[error("Detector count mismatch: expected {expected}, got {actual}")]
DetectorCountMismatch {
/// Expected detector count
expected: usize,
/// Actual detector count
actual: usize,
},
// ═══════════════════════════════════════════════════════════════════════
// Graph Errors
// ═══════════════════════════════════════════════════════════════════════
/// Graph vertex not found
#[error("Vertex {0} not found in operational graph")]
VertexNotFound(u64),
/// Graph edge not found
#[error("Edge {0} not found in operational graph")]
EdgeNotFound(u64),
/// Invalid graph update
#[error("Invalid graph update: {0}")]
InvalidGraphUpdate(String),
/// Graph version conflict
#[error("Graph version conflict: expected {expected}, current {current}")]
GraphVersionConflict {
/// Expected version
expected: u64,
/// Current version
current: u64,
},
// ═══════════════════════════════════════════════════════════════════════
// Permit/Token Errors
// ═══════════════════════════════════════════════════════════════════════
/// Permit token expired
#[error("Permit token expired: expired at {expired_at}, current time {current_time}")]
PermitExpired {
/// Expiration timestamp
expired_at: u64,
/// Current timestamp
current_time: u64,
},
/// Permit signature invalid
#[error("Permit signature verification failed")]
PermitSignatureInvalid,
/// Permit witness hash mismatch
#[error("Permit witness hash mismatch")]
PermitWitnessMismatch,
/// Action not authorized by permit
#[error("Action {action_id} not authorized by permit for regions {region_mask:?}")]
ActionNotAuthorized {
/// Action ID
action_id: String,
/// Region mask from permit
region_mask: [u64; 4],
},
// ═══════════════════════════════════════════════════════════════════════
// Witness/Receipt Errors
// ═══════════════════════════════════════════════════════════════════════
/// Witness chain broken
#[error("Witness chain broken at sequence {sequence}")]
WitnessChainBroken {
/// Sequence where chain broke
sequence: u64,
},
/// Receipt not found
#[error("Receipt not found for sequence {0}")]
ReceiptNotFound(u64),
/// Receipt verification failed
#[error("Receipt verification failed at sequence {sequence}: {reason}")]
ReceiptVerificationFailed {
/// Sequence number
sequence: u64,
/// Failure reason
reason: String,
},
// ═══════════════════════════════════════════════════════════════════════
// Fabric Errors
// ═══════════════════════════════════════════════════════════════════════
/// Fabric not initialized
#[error("Quantum fabric not initialized")]
FabricNotInitialized,
/// Fabric configuration invalid
#[error("Invalid fabric configuration: {0}")]
InvalidFabricConfig(String),
/// Fabric synchronization failed
#[error("Fabric synchronization failed: {0}")]
FabricSyncFailed(String),
// ═══════════════════════════════════════════════════════════════════════
// Integration Errors
// ═══════════════════════════════════════════════════════════════════════
/// MinCut integration error
#[error("MinCut error: {0}")]
MinCutError(String),
/// TileZero integration error
#[error("TileZero error: {0}")]
TileZeroError(String),
// ═══════════════════════════════════════════════════════════════════════
// General Errors
// ═══════════════════════════════════════════════════════════════════════
/// Internal error
#[error("Internal error: {0}")]
Internal(String),
/// Serialization error
#[error("Serialization error: {0}")]
Serialization(String),
/// IO error
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
}
impl RuQuError {
/// Check if error is recoverable (can retry operation)
pub fn is_recoverable(&self) -> bool {
matches!(
self,
RuQuError::DecisionTimeout { .. }
| RuQuError::TileCommunicationFailed { .. }
| RuQuError::SyndromeGap { .. }
| RuQuError::FabricSyncFailed(_)
)
}
/// Check if error indicates data corruption
pub fn is_corruption(&self) -> bool {
matches!(
self,
RuQuError::WitnessChainBroken { .. }
| RuQuError::ReceiptVerificationFailed { .. }
| RuQuError::PermitSignatureInvalid
| RuQuError::PermitWitnessMismatch
)
}
/// Check if error is a configuration problem
pub fn is_configuration(&self) -> bool {
matches!(
self,
RuQuError::InvalidThreshold { .. }
| RuQuError::InvalidFabricConfig(_)
| RuQuError::DetectorCountMismatch { .. }
)
}
/// Check if error is resource-related
pub fn is_resource(&self) -> bool {
matches!(
self,
RuQuError::TileMemoryExceeded { .. } | RuQuError::SyndromeBufferOverflow { .. }
)
}
}
impl From<serde_json::Error> for RuQuError {
fn from(err: serde_json::Error) -> Self {
RuQuError::Serialization(err.to_string())
}
}
impl From<String> for RuQuError {
fn from(msg: String) -> Self {
RuQuError::Internal(msg)
}
}
impl From<&str> for RuQuError {
fn from(msg: &str) -> Self {
RuQuError::Internal(msg.to_string())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_display() {
let err = RuQuError::InvalidTileId(300);
assert_eq!(err.to_string(), "Invalid tile ID: 300 (valid range: 0-255)");
let err = RuQuError::DecisionTimeout {
elapsed_ns: 5000,
budget_ns: 4000,
};
assert!(err.to_string().contains("5000ns"));
assert!(err.to_string().contains("4000ns"));
}
#[test]
fn test_is_recoverable() {
assert!(RuQuError::DecisionTimeout {
elapsed_ns: 5000,
budget_ns: 4000
}
.is_recoverable());
assert!(RuQuError::TileCommunicationFailed {
tile_id: 1,
reason: "timeout".to_string()
}
.is_recoverable());
assert!(!RuQuError::PermitSignatureInvalid.is_recoverable());
}
#[test]
fn test_is_corruption() {
assert!(RuQuError::WitnessChainBroken { sequence: 42 }.is_corruption());
assert!(RuQuError::PermitSignatureInvalid.is_corruption());
assert!(!RuQuError::InvalidTileId(300).is_corruption());
}
#[test]
fn test_is_configuration() {
assert!(RuQuError::InvalidThreshold {
name: "tau_deny".to_string(),
value: -1.0,
constraint: "> 0".to_string()
}
.is_configuration());
assert!(!RuQuError::Internal("oops".to_string()).is_configuration());
}
#[test]
fn test_from_string() {
let err: RuQuError = "test error".into();
assert!(matches!(err, RuQuError::Internal(_)));
assert_eq!(err.to_string(), "Internal error: test error");
}
}

1279
vendor/ruvector/crates/ruQu/src/fabric.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1356
vendor/ruvector/crates/ruQu/src/filters.rs vendored Normal file

File diff suppressed because it is too large Load Diff

208
vendor/ruvector/crates/ruQu/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,208 @@
//! # ruQu - Classical Nervous System for Quantum Machines
//!
//! Real-time syndrome processing and coherence assessment for quantum systems.
//!
//! This crate provides high-throughput, low-latency data pipelines for ingesting,
//! buffering, and transforming quantum error syndromes into coherence-relevant signals.
//!
//! ## Architecture
//!
//! ruQu is organized into several bounded contexts following Domain-Driven Design:
//!
//! - **Syndrome Processing** (Supporting Domain): High-throughput data acquisition
//! - **Coherence Gate** (Core Domain): Real-time structural assessment
//! - **Tile Architecture**: 256-tile WASM fabric for parallel processing
//!
//! The system uses a two-layer classical control approach:
//! 1. **RuVector Memory Layer**: Pattern recognition and historical mitigation retrieval
//! 2. **Dynamic Min-Cut Gate**: Real El-Hayek/Henzinger/Li O(n^{o(1)}) algorithm
//!
//! ## Quick Start
//!
//! ```rust
//! use ruqu::syndrome::{DetectorBitmap, SyndromeRound, SyndromeBuffer};
//!
//! // Create a detector bitmap for 64 detectors
//! let mut bitmap = DetectorBitmap::new(64);
//! bitmap.set(0, true);
//! bitmap.set(5, true);
//! bitmap.set(63, true);
//!
//! assert_eq!(bitmap.fired_count(), 3);
//!
//! // Create a syndrome round
//! let round = SyndromeRound {
//! round_id: 1,
//! cycle: 1000,
//! timestamp: 1705500000000,
//! detectors: bitmap,
//! source_tile: 0,
//! };
//!
//! // Buffer rounds for analysis
//! let mut buffer = SyndromeBuffer::new(1024);
//! buffer.push(round);
//! ```
//!
//! ## Three-Filter Decision Logic
//!
//! The coherence gate uses three stacked filters:
//! 1. **Structural Filter**: Min-cut based stability assessment
//! 2. **Shift Filter**: Drift detection from baseline patterns
//! 3. **Evidence Filter**: Anytime-valid e-value accumulation
//!
//! All three must pass for PERMIT. Any one can trigger DENY or DEFER.
//!
//! ## Performance Targets
//!
//! - Gate decision latency: < 4 microseconds p99
//! - Syndrome ingestion: 1M rounds/second
//! - Memory per tile: 64KB
//! - Total latency budget: ~2,350ns
//!
//! ## Feature Flags
//!
//! - `structural` - Enable min-cut based structural filter (requires ruvector-mincut)
//! - `tilezero` - Enable TileZero arbiter integration (requires cognitum-gate-tilezero)
//! - `simd` - Enable SIMD acceleration for bitmap operations
//! - `wasm` - WASM-compatible mode (disables native SIMD)
//! - `full` - Enable all features
#![deny(missing_docs)]
#![warn(clippy::all)]
#![warn(clippy::pedantic)]
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::missing_errors_doc)]
#![allow(clippy::missing_panics_doc)]
#![allow(clippy::cast_possible_truncation)]
#![allow(clippy::cast_sign_loss)]
// Core modules
pub mod attention;
pub mod decoder;
pub mod error;
pub mod fabric;
pub mod filters;
pub mod mincut;
pub mod syndrome;
pub mod tile;
pub mod types;
// Advanced features
pub mod adaptive;
pub mod metrics;
pub mod parallel;
pub mod stim;
// Production interfaces
pub mod schema;
pub mod traits;
// Re-exports for convenient access
pub use adaptive::{
AdaptiveStats, AdaptiveThresholds, DriftConfig, DriftDetector, DriftDirection, DriftProfile,
LearningConfig,
};
pub use attention::{AttentionConfig, AttentionStats, CoherenceAttention, GatePacketBridge};
pub use decoder::{Correction, DecoderConfig, MWPMDecoder, StreamingDecoder};
pub use error::{Result, RuQuError};
pub use fabric::{
linear_patch_map, surface_code, surface_code_d7, CoherenceGate, DecisionStats, FabricBuilder,
FabricConfig, FabricState, FilterSummary, PatchMap, QuantumFabric, TileAssignment,
WitnessReceipt,
};
pub use filters::{
EdgeId as FilterEdgeId, EvidenceAccumulator, EvidenceFilter, EvidenceResult, FilterConfig,
FilterPipeline, FilterResults, RegionMask, ShiftFilter, ShiftResult, StructuralFilter,
StructuralResult, SystemState, Verdict,
};
pub use metrics::{Counter, Gauge, Histogram, MetricsCollector, MetricsConfig, MetricsSnapshot};
pub use mincut::{DynamicMinCutEngine, MinCutResult};
pub use parallel::{parallel_aggregate, ParallelConfig, ParallelFabric, ParallelStats};
pub use stim::{ErrorPatternGenerator, StimSyndromeSource, SurfaceCodeConfig, SyndromeStats};
pub use syndrome::{
BufferStatistics, DetectorBitmap, SyndromeBuffer, SyndromeDelta, SyndromeRound,
};
pub use tile::{
GateDecision, GateThresholds, LocalCutState, PatchGraph, PermitToken, ReceiptLog, TileReport,
TileZero, WorkerTile,
};
pub use types::{
ActionId, CycleId, GateDecision as DomainGateDecision, RegionMask as DomainRegionMask, RoundId,
SequenceId, TileId as DomainTileId,
};
/// Crate version
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Crate name
pub const NAME: &str = env!("CARGO_PKG_NAME");
/// Maximum number of detectors supported (1024 = 16 * 64 bits)
pub const MAX_DETECTORS: usize = 1024;
/// Default buffer capacity in rounds
pub const DEFAULT_BUFFER_CAPACITY: usize = 1024;
/// Total number of tiles in the fabric
pub const TILE_COUNT: usize = 256;
/// Number of worker tiles (excluding TileZero)
pub const WORKER_TILE_COUNT: usize = 255;
/// Memory budget per tile in bytes (64KB)
pub const TILE_MEMORY_BUDGET: usize = 65536;
/// Prelude module for convenient imports
pub mod prelude {
//! Commonly used types for syndrome processing, filters, and tile architecture.
pub use crate::adaptive::{
AdaptiveStats, AdaptiveThresholds, DriftConfig, DriftDetector, DriftProfile, LearningConfig,
};
pub use crate::error::{Result, RuQuError};
pub use crate::fabric::{
linear_patch_map, surface_code, surface_code_d7, CoherenceGate, DecisionStats,
FabricBuilder, FabricConfig, FabricState, PatchMap, QuantumFabric, TileAssignment,
WitnessReceipt,
};
pub use crate::filters::{
EvidenceAccumulator, EvidenceFilter, EvidenceResult, FilterConfig, FilterPipeline,
FilterResults, RegionMask, ShiftFilter, ShiftResult, StructuralFilter, StructuralResult,
SystemState, Verdict,
};
pub use crate::metrics::{MetricsCollector, MetricsConfig, MetricsSnapshot};
pub use crate::parallel::{ParallelConfig, ParallelFabric, ParallelStats};
pub use crate::stim::{StimSyndromeSource, SurfaceCodeConfig, SyndromeStats};
pub use crate::syndrome::{
BufferStatistics, DetectorBitmap, SyndromeBuffer, SyndromeDelta, SyndromeRound,
};
pub use crate::tile::{
GateDecision, GateThresholds, LocalCutState, PatchGraph, PermitToken, ReceiptLog,
TileReport, TileZero, WorkerTile,
};
pub use crate::types::{
ActionId, CycleId, GateDecision as DomainGateDecision, RegionMask as DomainRegionMask,
RoundId, SequenceId,
};
pub use crate::{
DEFAULT_BUFFER_CAPACITY, MAX_DETECTORS, TILE_COUNT, TILE_MEMORY_BUDGET, WORKER_TILE_COUNT,
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_version_constant() {
assert!(!VERSION.is_empty());
assert!(!NAME.is_empty());
assert_eq!(NAME, "ruqu");
}
#[test]
fn test_constants() {
assert_eq!(MAX_DETECTORS, 1024);
assert_eq!(DEFAULT_BUFFER_CAPACITY, 1024);
}
}

View File

@@ -0,0 +1,587 @@
//! Observability and Metrics
//!
//! This module provides tracing, metrics, and observability features for
//! production deployments. Integrates with standard observability stacks.
//!
//! ## Features
//!
//! - **Tracing**: Structured spans for request tracing
//! - **Metrics**: Counters, gauges, histograms for monitoring
//! - **Health Checks**: Liveness and readiness probes
//!
//! ## Usage
//!
//! ```rust,ignore
//! use ruqu::metrics::{MetricsCollector, MetricsConfig};
//!
//! let config = MetricsConfig::default();
//! let mut metrics = MetricsCollector::new(config);
//!
//! // Record gate decision
//! metrics.record_decision(GateDecision::Permit, latency_ns);
//!
//! // Export metrics
//! let snapshot = metrics.snapshot();
//! ```
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
use crate::tile::GateDecision;
/// Configuration for metrics collection
#[derive(Clone, Debug)]
pub struct MetricsConfig {
/// Enable detailed histograms (more memory)
pub enable_histograms: bool,
/// Histogram bucket boundaries (nanoseconds)
pub histogram_buckets: Vec<u64>,
/// Enable per-tile metrics
pub per_tile_metrics: bool,
/// Metrics export interval
pub export_interval: Duration,
}
impl Default for MetricsConfig {
fn default() -> Self {
Self {
enable_histograms: true,
histogram_buckets: vec![
100, 250, 500, 1_000, 2_500, 5_000, 10_000, 25_000, 50_000, 100_000,
],
per_tile_metrics: false,
export_interval: Duration::from_secs(10),
}
}
}
/// Counter metric
#[derive(Debug, Default)]
pub struct Counter {
value: AtomicU64,
}
impl Counter {
/// Create a new counter
pub fn new() -> Self {
Self {
value: AtomicU64::new(0),
}
}
/// Increment counter by 1
pub fn inc(&self) {
self.value.fetch_add(1, Ordering::Relaxed);
}
/// Add value to counter
pub fn add(&self, val: u64) {
self.value.fetch_add(val, Ordering::Relaxed);
}
/// Get current value
pub fn get(&self) -> u64 {
self.value.load(Ordering::Relaxed)
}
/// Reset counter
pub fn reset(&self) {
self.value.store(0, Ordering::Relaxed);
}
}
/// Gauge metric (can go up or down)
#[derive(Debug, Default)]
pub struct Gauge {
value: AtomicU64,
}
impl Gauge {
/// Create a new gauge
pub fn new() -> Self {
Self {
value: AtomicU64::new(0),
}
}
/// Set gauge value
pub fn set(&self, val: u64) {
self.value.store(val, Ordering::Relaxed);
}
/// Set gauge from f64 (stored as fixed-point)
pub fn set_f64(&self, val: f64) {
self.value
.store((val * 1_000_000.0) as u64, Ordering::Relaxed);
}
/// Get current value
pub fn get(&self) -> u64 {
self.value.load(Ordering::Relaxed)
}
/// Get as f64
pub fn get_f64(&self) -> f64 {
self.value.load(Ordering::Relaxed) as f64 / 1_000_000.0
}
}
/// Histogram for latency distribution
#[derive(Debug)]
pub struct Histogram {
buckets: Vec<u64>,
counts: Vec<AtomicU64>,
sum: AtomicU64,
count: AtomicU64,
}
impl Histogram {
/// Create a new histogram with bucket boundaries
pub fn new(buckets: Vec<u64>) -> Self {
let counts = (0..=buckets.len()).map(|_| AtomicU64::new(0)).collect();
Self {
buckets,
counts,
sum: AtomicU64::new(0),
count: AtomicU64::new(0),
}
}
/// Record a value
pub fn observe(&self, value: u64) {
self.sum.fetch_add(value, Ordering::Relaxed);
self.count.fetch_add(1, Ordering::Relaxed);
// Find bucket
let idx = self
.buckets
.iter()
.position(|&b| value <= b)
.unwrap_or(self.buckets.len());
self.counts[idx].fetch_add(1, Ordering::Relaxed);
}
/// Get bucket counts
pub fn bucket_counts(&self) -> Vec<u64> {
self.counts
.iter()
.map(|c| c.load(Ordering::Relaxed))
.collect()
}
/// Get total count
pub fn get_count(&self) -> u64 {
self.count.load(Ordering::Relaxed)
}
/// Get sum
pub fn get_sum(&self) -> u64 {
self.sum.load(Ordering::Relaxed)
}
/// Get mean
pub fn mean(&self) -> f64 {
let count = self.get_count();
if count == 0 {
return 0.0;
}
self.get_sum() as f64 / count as f64
}
/// Estimate percentile (approximate)
pub fn percentile(&self, p: f64) -> u64 {
let total = self.get_count();
if total == 0 {
return 0;
}
let target = (total as f64 * p) as u64;
let mut cumulative = 0u64;
for (i, count) in self.counts.iter().enumerate() {
cumulative += count.load(Ordering::Relaxed);
if cumulative >= target {
return if i < self.buckets.len() {
self.buckets[i]
} else {
self.buckets.last().copied().unwrap_or(0) * 2
};
}
}
self.buckets.last().copied().unwrap_or(0) * 2
}
}
/// Main metrics collector
pub struct MetricsCollector {
config: MetricsConfig,
// Decision counters
permits: Counter,
defers: Counter,
denies: Counter,
// Latency histograms
tick_latency: Histogram,
merge_latency: Histogram,
total_latency: Histogram,
// Throughput gauges
throughput: Gauge,
active_tiles: Gauge,
// Error metrics
errors: Counter,
// Min-cut metrics
min_cut_value: Gauge,
min_cut_queries: Counter,
// Coherence metrics
coherence_level: Gauge,
shift_pressure: Gauge,
// Timing
start_time: Instant,
last_export: Instant,
}
impl MetricsCollector {
/// Create a new metrics collector
pub fn new(config: MetricsConfig) -> Self {
let buckets = config.histogram_buckets.clone();
Self {
config,
permits: Counter::new(),
defers: Counter::new(),
denies: Counter::new(),
tick_latency: Histogram::new(buckets.clone()),
merge_latency: Histogram::new(buckets.clone()),
total_latency: Histogram::new(buckets),
throughput: Gauge::new(),
active_tiles: Gauge::new(),
errors: Counter::new(),
min_cut_value: Gauge::new(),
min_cut_queries: Counter::new(),
coherence_level: Gauge::new(),
shift_pressure: Gauge::new(),
start_time: Instant::now(),
last_export: Instant::now(),
}
}
/// Record a gate decision
pub fn record_decision(&self, decision: GateDecision, latency_ns: u64) {
match decision {
GateDecision::Permit => self.permits.inc(),
GateDecision::Defer => self.defers.inc(),
GateDecision::Deny => self.denies.inc(),
}
self.total_latency.observe(latency_ns);
}
/// Record tick latency
pub fn record_tick_latency(&self, latency_ns: u64) {
self.tick_latency.observe(latency_ns);
}
/// Record merge latency
pub fn record_merge_latency(&self, latency_ns: u64) {
self.merge_latency.observe(latency_ns);
}
/// Record min-cut query
pub fn record_min_cut(&self, value: f64, latency_ns: u64) {
self.min_cut_value.set_f64(value);
self.min_cut_queries.inc();
// Could add a separate histogram for min-cut latency
}
/// Record coherence metrics
pub fn record_coherence(&self, min_cut: f64, shift: f64) {
self.coherence_level.set_f64(min_cut);
self.shift_pressure.set_f64(shift);
}
/// Record an error
pub fn record_error(&self) {
self.errors.inc();
}
/// Update throughput gauge
pub fn update_throughput(&self, syndromes_per_sec: f64) {
self.throughput.set_f64(syndromes_per_sec);
}
/// Set active tile count
pub fn set_active_tiles(&self, count: u64) {
self.active_tiles.set(count);
}
/// Get metrics snapshot
pub fn snapshot(&self) -> MetricsSnapshot {
let elapsed = self.start_time.elapsed();
MetricsSnapshot {
uptime_secs: elapsed.as_secs(),
// Decisions
permits: self.permits.get(),
defers: self.defers.get(),
denies: self.denies.get(),
// Latency
tick_latency_mean_ns: self.tick_latency.mean() as u64,
tick_latency_p50_ns: self.tick_latency.percentile(0.5),
tick_latency_p99_ns: self.tick_latency.percentile(0.99),
merge_latency_mean_ns: self.merge_latency.mean() as u64,
merge_latency_p99_ns: self.merge_latency.percentile(0.99),
total_latency_mean_ns: self.total_latency.mean() as u64,
total_latency_p99_ns: self.total_latency.percentile(0.99),
// Throughput
throughput: self.throughput.get_f64(),
total_decisions: self.permits.get() + self.defers.get() + self.denies.get(),
// Health
errors: self.errors.get(),
active_tiles: self.active_tiles.get(),
// Coherence
min_cut_value: self.min_cut_value.get_f64(),
shift_pressure: self.shift_pressure.get_f64(),
}
}
/// Export as Prometheus format
pub fn prometheus_export(&self) -> String {
let snap = self.snapshot();
let mut out = String::new();
// Help and type declarations
out.push_str("# HELP ruqu_decisions_total Total gate decisions by type\n");
out.push_str("# TYPE ruqu_decisions_total counter\n");
out.push_str(&format!(
"ruqu_decisions_total{{type=\"permit\"}} {}\n",
snap.permits
));
out.push_str(&format!(
"ruqu_decisions_total{{type=\"defer\"}} {}\n",
snap.defers
));
out.push_str(&format!(
"ruqu_decisions_total{{type=\"deny\"}} {}\n",
snap.denies
));
out.push_str("\n# HELP ruqu_latency_nanoseconds Latency in nanoseconds\n");
out.push_str("# TYPE ruqu_latency_nanoseconds summary\n");
out.push_str(&format!(
"ruqu_latency_nanoseconds{{quantile=\"0.5\"}} {}\n",
snap.tick_latency_p50_ns
));
out.push_str(&format!(
"ruqu_latency_nanoseconds{{quantile=\"0.99\"}} {}\n",
snap.tick_latency_p99_ns
));
out.push_str("\n# HELP ruqu_throughput_syndromes_per_second Current throughput\n");
out.push_str("# TYPE ruqu_throughput_syndromes_per_second gauge\n");
out.push_str(&format!(
"ruqu_throughput_syndromes_per_second {}\n",
snap.throughput
));
out.push_str("\n# HELP ruqu_coherence_min_cut Current min-cut value\n");
out.push_str("# TYPE ruqu_coherence_min_cut gauge\n");
out.push_str(&format!("ruqu_coherence_min_cut {}\n", snap.min_cut_value));
out.push_str("\n# HELP ruqu_errors_total Total errors\n");
out.push_str("# TYPE ruqu_errors_total counter\n");
out.push_str(&format!("ruqu_errors_total {}\n", snap.errors));
out
}
/// Check if healthy (for liveness probes)
pub fn is_healthy(&self) -> bool {
// Healthy if we've processed something and error rate is low
let total = self.permits.get() + self.defers.get() + self.denies.get();
let errors = self.errors.get();
if total == 0 {
return true; // Not started yet
}
// Error rate < 1%
(errors as f64 / total as f64) < 0.01
}
/// Check if ready (for readiness probes)
pub fn is_ready(&self) -> bool {
// Ready if we're processing within latency targets
let p99 = self.total_latency.percentile(0.99);
p99 < 4_000_000 // 4ms target
}
}
/// Metrics snapshot for export
#[derive(Clone, Debug, Default)]
pub struct MetricsSnapshot {
/// Uptime in seconds
pub uptime_secs: u64,
/// Total permit decisions
pub permits: u64,
/// Total defer decisions
pub defers: u64,
/// Total deny decisions
pub denies: u64,
/// Mean tick latency (ns)
pub tick_latency_mean_ns: u64,
/// P50 tick latency (ns)
pub tick_latency_p50_ns: u64,
/// P99 tick latency (ns)
pub tick_latency_p99_ns: u64,
/// Mean merge latency (ns)
pub merge_latency_mean_ns: u64,
/// P99 merge latency (ns)
pub merge_latency_p99_ns: u64,
/// Mean total latency (ns)
pub total_latency_mean_ns: u64,
/// P99 total latency (ns)
pub total_latency_p99_ns: u64,
/// Current throughput
pub throughput: f64,
/// Total decisions made
pub total_decisions: u64,
/// Total errors
pub errors: u64,
/// Active tiles
pub active_tiles: u64,
/// Current min-cut value
pub min_cut_value: f64,
/// Current shift pressure
pub shift_pressure: f64,
}
impl MetricsSnapshot {
/// Calculate permit rate
pub fn permit_rate(&self) -> f64 {
if self.total_decisions == 0 {
return 0.0;
}
self.permits as f64 / self.total_decisions as f64
}
/// Calculate deny rate
pub fn deny_rate(&self) -> f64 {
if self.total_decisions == 0 {
return 0.0;
}
self.denies as f64 / self.total_decisions as f64
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_counter() {
let counter = Counter::new();
assert_eq!(counter.get(), 0);
counter.inc();
assert_eq!(counter.get(), 1);
counter.add(10);
assert_eq!(counter.get(), 11);
}
#[test]
fn test_gauge() {
let gauge = Gauge::new();
gauge.set(100);
assert_eq!(gauge.get(), 100);
gauge.set_f64(3.14159);
assert!((gauge.get_f64() - 3.14159).abs() < 0.001);
}
#[test]
fn test_histogram() {
let hist = Histogram::new(vec![100, 500, 1000]);
hist.observe(50);
hist.observe(200);
hist.observe(800);
hist.observe(2000);
assert_eq!(hist.get_count(), 4);
let counts = hist.bucket_counts();
assert_eq!(counts[0], 1); // <= 100
assert_eq!(counts[1], 1); // <= 500
assert_eq!(counts[2], 1); // <= 1000
assert_eq!(counts[3], 1); // > 1000
}
#[test]
fn test_metrics_collector() {
let config = MetricsConfig::default();
let metrics = MetricsCollector::new(config);
metrics.record_decision(GateDecision::Permit, 500);
metrics.record_decision(GateDecision::Permit, 600);
metrics.record_decision(GateDecision::Deny, 1000);
let snap = metrics.snapshot();
assert_eq!(snap.permits, 2);
assert_eq!(snap.denies, 1);
assert_eq!(snap.total_decisions, 3);
}
#[test]
fn test_prometheus_export() {
let config = MetricsConfig::default();
let metrics = MetricsCollector::new(config);
metrics.record_decision(GateDecision::Permit, 500);
let prom = metrics.prometheus_export();
assert!(prom.contains("ruqu_decisions_total"));
assert!(prom.contains("permit"));
}
#[test]
fn test_health_checks() {
let config = MetricsConfig::default();
let metrics = MetricsCollector::new(config);
assert!(metrics.is_healthy());
assert!(metrics.is_ready());
// Record some decisions
for _ in 0..100 {
metrics.record_decision(GateDecision::Permit, 500);
}
assert!(metrics.is_healthy());
}
}

View File

@@ -0,0 +1,332 @@
//! Real Dynamic Min-Cut Integration
//!
//! This module provides integration with the `ruvector-mincut` crate's
//! SubpolynomialMinCut algorithm - the El-Hayek/Henzinger/Li December 2025
//! breakthrough achieving O(n^{o(1)}) amortized update time.
//!
//! When the `structural` feature is enabled, this provides real subpolynomial
//! min-cut computation. Otherwise, falls back to a degree-based heuristic.
#[cfg(not(feature = "structural"))]
use std::collections::HashMap;
/// Vertex identifier for min-cut graphs
pub type VertexId = u32;
/// Edge weight type
pub type Weight = f64;
/// Result of a min-cut query
#[derive(Debug, Clone)]
pub struct MinCutResult {
/// The minimum cut value
pub value: f64,
/// Whether this is an exact result
pub is_exact: bool,
/// The cut edges (if computed)
pub cut_edges: Option<Vec<(VertexId, VertexId)>>,
/// Witness certificate (hash of witness tree)
pub witness_hash: Option<[u8; 32]>,
}
/// Dynamic min-cut engine using the real El-Hayek/Henzinger/Li algorithm
#[cfg(feature = "structural")]
pub struct DynamicMinCutEngine {
/// The real subpolynomial min-cut structure
inner: ruvector_mincut::subpolynomial::SubpolynomialMinCut,
/// Cached cut value
cached_cut: Option<f64>,
/// Generation counter for cache invalidation
generation: u64,
}
#[cfg(feature = "structural")]
impl DynamicMinCutEngine {
/// Create a new dynamic min-cut engine
pub fn new() -> Self {
use ruvector_mincut::subpolynomial::{SubpolyConfig, SubpolynomialMinCut};
let config = SubpolyConfig {
phi: 0.01,
lambda_max: 1000,
epsilon: 0.1,
target_levels: 4,
track_recourse: false,
certify_cuts: true,
parallel: false,
..Default::default()
};
Self {
inner: SubpolynomialMinCut::new(config),
cached_cut: None,
generation: 0,
}
}
/// Insert an edge
#[inline]
pub fn insert_edge(&mut self, u: VertexId, v: VertexId, weight: Weight) {
let _ = self.inner.insert_edge(u as u64, v as u64, weight);
self.cached_cut = None;
self.generation += 1;
}
/// Delete an edge
#[inline]
pub fn delete_edge(&mut self, u: VertexId, v: VertexId) {
let _ = self.inner.delete_edge(u as u64, v as u64);
self.cached_cut = None;
self.generation += 1;
}
/// Update edge weight
#[inline]
pub fn update_weight(&mut self, u: VertexId, v: VertexId, new_weight: Weight) {
// Delete and re-insert with new weight
let _ = self.inner.delete_edge(u as u64, v as u64);
let _ = self.inner.insert_edge(u as u64, v as u64, new_weight);
self.cached_cut = None;
self.generation += 1;
}
/// Query the minimum cut value
#[inline]
pub fn min_cut_value(&mut self) -> f64 {
if let Some(cached) = self.cached_cut {
return cached;
}
let value = self.inner.min_cut_value();
self.cached_cut = Some(value);
value
}
/// Query full min-cut result with certificate
pub fn min_cut(&mut self) -> MinCutResult {
let result = self.inner.min_cut();
// Compute witness hash from result properties
let mut hasher = blake3::Hasher::new();
hasher.update(&result.value.to_le_bytes());
hasher.update(if result.is_exact { &[1u8] } else { &[0u8] });
hasher.update(if result.complexity_verified {
&[1u8]
} else {
&[0u8]
});
let witness_hash = Some(*hasher.finalize().as_bytes());
MinCutResult {
value: result.value,
is_exact: result.is_exact,
cut_edges: result.cut_edges.map(|edges| {
edges
.into_iter()
.map(|(u, v)| (u as VertexId, v as VertexId))
.collect()
}),
witness_hash,
}
}
/// Get current generation (for cache coordination)
pub fn generation(&self) -> u64 {
self.generation
}
/// Check if the graph is connected (by checking min-cut > 0)
pub fn is_connected(&self) -> bool {
// A graph is connected if its min-cut is positive
self.inner.min_cut_value() > 0.0
}
/// Get number of vertices
pub fn num_vertices(&self) -> usize {
self.inner.num_vertices()
}
/// Get number of edges
pub fn num_edges(&self) -> usize {
self.inner.num_edges()
}
}
#[cfg(feature = "structural")]
impl Default for DynamicMinCutEngine {
fn default() -> Self {
Self::new()
}
}
/// Fallback min-cut engine when ruvector-mincut is not available
#[cfg(not(feature = "structural"))]
pub struct DynamicMinCutEngine {
/// Simple edge list for degree-based heuristic
edges: HashMap<(VertexId, VertexId), Weight>,
/// Vertex degrees
degrees: HashMap<VertexId, u32>,
/// Total weight
total_weight: f64,
/// Generation counter
generation: u64,
}
#[cfg(not(feature = "structural"))]
impl DynamicMinCutEngine {
/// Create a new fallback min-cut engine
pub fn new() -> Self {
Self {
edges: HashMap::new(),
degrees: HashMap::new(),
total_weight: 0.0,
generation: 0,
}
}
/// Insert an edge
pub fn insert_edge(&mut self, u: VertexId, v: VertexId, weight: Weight) {
let key = if u < v { (u, v) } else { (v, u) };
if self.edges.insert(key, weight).is_none() {
*self.degrees.entry(u).or_insert(0) += 1;
*self.degrees.entry(v).or_insert(0) += 1;
self.total_weight += weight;
}
self.generation += 1;
}
/// Delete an edge
pub fn delete_edge(&mut self, u: VertexId, v: VertexId) {
let key = if u < v { (u, v) } else { (v, u) };
if let Some(weight) = self.edges.remove(&key) {
if let Some(deg) = self.degrees.get_mut(&u) {
*deg = deg.saturating_sub(1);
}
if let Some(deg) = self.degrees.get_mut(&v) {
*deg = deg.saturating_sub(1);
}
self.total_weight -= weight;
}
self.generation += 1;
}
/// Update edge weight
pub fn update_weight(&mut self, u: VertexId, v: VertexId, new_weight: Weight) {
let key = if u < v { (u, v) } else { (v, u) };
if let Some(old_weight) = self.edges.get_mut(&key) {
self.total_weight -= *old_weight;
*old_weight = new_weight;
self.total_weight += new_weight;
}
self.generation += 1;
}
/// Query the minimum cut value (heuristic: min degree * avg weight)
pub fn min_cut_value(&mut self) -> f64 {
if self.degrees.is_empty() || self.edges.is_empty() {
return 0.0;
}
let min_degree = self.degrees.values().copied().min().unwrap_or(0) as f64;
let avg_weight = self.total_weight / self.edges.len() as f64;
min_degree * avg_weight
}
/// Query full min-cut result (heuristic, not exact)
pub fn min_cut(&mut self) -> MinCutResult {
MinCutResult {
value: self.min_cut_value(),
is_exact: false,
cut_edges: None,
witness_hash: None,
}
}
/// Get current generation
pub fn generation(&self) -> u64 {
self.generation
}
/// Check if the graph is connected (simplified check)
pub fn is_connected(&self) -> bool {
// Simplified: assume connected if we have edges
!self.edges.is_empty()
}
/// Get number of vertices
pub fn num_vertices(&self) -> usize {
self.degrees.len()
}
/// Get number of edges
pub fn num_edges(&self) -> usize {
self.edges.len()
}
}
#[cfg(not(feature = "structural"))]
impl Default for DynamicMinCutEngine {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_engine_basic() {
let mut engine = DynamicMinCutEngine::new();
// Build a simple triangle
engine.insert_edge(0, 1, 1.0);
engine.insert_edge(1, 2, 1.0);
engine.insert_edge(2, 0, 1.0);
let cut = engine.min_cut_value();
assert!(cut > 0.0, "Triangle should have positive min-cut");
// Add another vertex with single connection
engine.insert_edge(2, 3, 1.0);
let cut2 = engine.min_cut_value();
// Min-cut should be 1 (the single edge to vertex 3)
// With heuristic, it will be approximately this
assert!(cut2 <= cut + 0.1 || cut2 >= 0.9);
}
#[test]
fn test_engine_delete() {
let mut engine = DynamicMinCutEngine::new();
engine.insert_edge(0, 1, 1.0);
engine.insert_edge(1, 2, 1.0);
engine.insert_edge(2, 0, 1.0);
let gen1 = engine.generation();
engine.delete_edge(2, 0);
let gen2 = engine.generation();
assert!(gen2 > gen1, "Generation should increase on delete");
assert_eq!(engine.num_edges(), 2);
}
#[test]
fn test_min_cut_result() {
let mut engine = DynamicMinCutEngine::new();
engine.insert_edge(0, 1, 2.0);
engine.insert_edge(1, 2, 3.0);
let result = engine.min_cut();
assert!(result.value >= 0.0);
#[cfg(feature = "structural")]
assert!(result.is_exact, "With structural feature, should be exact");
#[cfg(not(feature = "structural"))]
assert!(!result.is_exact, "Without structural feature, is heuristic");
}
}

View File

@@ -0,0 +1,349 @@
//! Parallel Processing for 256-Tile Fabric
//!
//! This module provides rayon-based parallel processing for the tile fabric,
//! achieving 4-8× throughput improvement on multi-core systems.
//!
//! ## Architecture
//!
//! ```text
//! Syndromes ──┬──► Tile 0-63 ──┐
//! ├──► Tile 64-127 ──┼──► TileZero Merge ──► Decision
//! ├──► Tile 128-191 ─┤
//! └──► Tile 192-255 ─┘
//! (parallel) (parallel reduce)
//! ```
//!
//! ## Usage
//!
//! ```rust,ignore
//! use ruqu::parallel::{ParallelFabric, ParallelConfig};
//!
//! let config = ParallelConfig::default(); // Auto-detect cores
//! let mut fabric = ParallelFabric::new(config)?;
//!
//! // Process syndromes in parallel
//! let decision = fabric.process_parallel(&syndrome_data)?;
//! ```
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use crate::error::{Result, RuQuError};
use crate::tile::{GateDecision, GateThresholds, SyndromeDelta, TileReport, TileZero, WorkerTile};
/// Configuration for parallel processing
#[derive(Clone, Debug)]
pub struct ParallelConfig {
/// Number of worker threads (0 = auto-detect)
pub num_threads: usize,
/// Chunk size for parallel iteration
pub chunk_size: usize,
/// Enable work-stealing scheduler
pub work_stealing: bool,
/// Tile thresholds
pub thresholds: GateThresholds,
}
impl Default for ParallelConfig {
fn default() -> Self {
Self {
num_threads: 0, // Auto-detect
chunk_size: 16, // Process 16 tiles per chunk
work_stealing: true,
thresholds: GateThresholds::default(),
}
}
}
impl ParallelConfig {
/// Create config optimized for low latency
pub fn low_latency() -> Self {
Self {
num_threads: 4,
chunk_size: 64, // Larger chunks = less overhead
work_stealing: false, // Predictable scheduling
thresholds: GateThresholds::default(),
}
}
/// Create config optimized for high throughput
pub fn high_throughput() -> Self {
Self {
num_threads: 0, // Use all cores
chunk_size: 8, // Smaller chunks = better load balancing
work_stealing: true,
thresholds: GateThresholds::default(),
}
}
}
/// Parallel fabric for multi-threaded syndrome processing
pub struct ParallelFabric {
/// Worker tiles (256 total, indices 1-255 are workers)
workers: Vec<WorkerTile>,
/// TileZero coordinator
coordinator: TileZero,
/// Configuration
config: ParallelConfig,
/// Statistics
stats: ParallelStats,
}
/// Statistics for parallel processing
#[derive(Clone, Copy, Debug, Default)]
pub struct ParallelStats {
/// Total syndromes processed
pub total_processed: u64,
/// Total parallel batches
pub batches: u64,
/// Average batch time (nanoseconds)
pub avg_batch_time_ns: u64,
/// Peak throughput (syndromes/sec)
pub peak_throughput: f64,
}
impl ParallelFabric {
/// Create a new parallel fabric
pub fn new(config: ParallelConfig) -> Result<Self> {
#[cfg(feature = "parallel")]
{
// Configure rayon thread pool if needed
if config.num_threads > 0 {
rayon::ThreadPoolBuilder::new()
.num_threads(config.num_threads)
.build_global()
.ok(); // Ignore if already initialized
}
}
// Create 255 worker tiles (1-255)
let workers: Vec<WorkerTile> = (1..=255u8).map(WorkerTile::new).collect();
let coordinator = TileZero::with_random_key(config.thresholds.clone());
Ok(Self {
workers,
coordinator,
config,
stats: ParallelStats::default(),
})
}
/// Process a syndrome batch in parallel
#[cfg(feature = "parallel")]
pub fn process_parallel(&mut self, syndrome: &SyndromeDelta) -> Result<GateDecision> {
use std::time::Instant;
let start = Instant::now();
// Process all workers in parallel
let reports: Vec<TileReport> = self
.workers
.par_iter_mut()
.with_min_len(self.config.chunk_size)
.map(|worker| worker.tick(syndrome))
.collect();
// Merge reports (single-threaded at coordinator)
let decision = self.coordinator.merge_reports(reports);
// Update stats
let elapsed_ns = start.elapsed().as_nanos() as u64;
self.stats.total_processed += 255;
self.stats.batches += 1;
self.stats.avg_batch_time_ns = (self.stats.avg_batch_time_ns * (self.stats.batches - 1)
+ elapsed_ns)
/ self.stats.batches;
let throughput = 255.0 / (elapsed_ns as f64 / 1_000_000_000.0);
if throughput > self.stats.peak_throughput {
self.stats.peak_throughput = throughput;
}
Ok(decision)
}
/// Process a syndrome batch (fallback for non-parallel builds)
#[cfg(not(feature = "parallel"))]
pub fn process_parallel(&mut self, syndrome: &SyndromeDelta) -> Result<GateDecision> {
use std::time::Instant;
let start = Instant::now();
// Process all workers sequentially
let reports: Vec<TileReport> = self
.workers
.iter_mut()
.map(|worker| worker.tick(syndrome))
.collect();
// Merge reports
let decision = self.coordinator.merge_reports(reports);
// Update stats
let elapsed_ns = start.elapsed().as_nanos() as u64;
self.stats.total_processed += 255;
self.stats.batches += 1;
self.stats.avg_batch_time_ns = (self.stats.avg_batch_time_ns * (self.stats.batches - 1)
+ elapsed_ns)
/ self.stats.batches;
Ok(decision)
}
/// Process multiple syndromes in parallel (batch mode)
#[cfg(feature = "parallel")]
pub fn process_batch(&mut self, syndromes: &[SyndromeDelta]) -> Result<Vec<GateDecision>> {
// Process each syndrome, parallelizing across tiles within each
let decisions: Vec<GateDecision> = syndromes
.iter()
.map(|s| self.process_parallel(s).unwrap_or(GateDecision::Defer))
.collect();
Ok(decisions)
}
/// Process multiple syndromes (fallback)
#[cfg(not(feature = "parallel"))]
pub fn process_batch(&mut self, syndromes: &[SyndromeDelta]) -> Result<Vec<GateDecision>> {
let decisions: Vec<GateDecision> = syndromes
.iter()
.map(|s| self.process_parallel(s).unwrap_or(GateDecision::Defer))
.collect();
Ok(decisions)
}
/// Get processing statistics
pub fn stats(&self) -> &ParallelStats {
&self.stats
}
/// Reset statistics
pub fn reset_stats(&mut self) {
self.stats = ParallelStats::default();
}
/// Get the coordinator for direct access
pub fn coordinator(&self) -> &TileZero {
&self.coordinator
}
/// Get mutable coordinator
pub fn coordinator_mut(&mut self) -> &mut TileZero {
&mut self.coordinator
}
}
/// Parallel reduce for aggregating tile reports
#[cfg(feature = "parallel")]
pub fn parallel_aggregate(reports: &[TileReport]) -> (f64, f64, f64) {
use rayon::prelude::*;
if reports.is_empty() {
return (f64::MAX, 0.0, 1.0);
}
// Parallel reduction for min_cut (minimum)
let min_cut = reports
.par_iter()
.map(|r| {
if r.local_cut > 0.0 {
r.local_cut
} else {
f64::MAX
}
})
.reduce(|| f64::MAX, |a, b| a.min(b));
// Parallel reduction for shift (maximum)
let max_shift = reports
.par_iter()
.map(|r| r.shift_score)
.reduce(|| 0.0, |a, b| a.max(b));
// Parallel reduction for e-value (geometric mean via log sum)
let log_sum: f64 = reports
.par_iter()
.map(|r| f64::log2(r.e_value.max(1e-10)))
.sum();
let e_aggregate = f64::exp2(log_sum / reports.len() as f64);
(min_cut, max_shift, e_aggregate)
}
/// Sequential aggregate (fallback)
#[cfg(not(feature = "parallel"))]
pub fn parallel_aggregate(reports: &[TileReport]) -> (f64, f64, f64) {
if reports.is_empty() {
return (f64::MAX, 0.0, 1.0);
}
let mut min_cut = f64::MAX;
let mut max_shift = 0.0;
let mut log_sum = 0.0;
for r in reports {
if r.local_cut > 0.0 && r.local_cut < min_cut {
min_cut = r.local_cut;
}
if r.shift_score > max_shift {
max_shift = r.shift_score;
}
log_sum += f64::log2(r.e_value.max(1e-10));
}
let e_aggregate = f64::exp2(log_sum / reports.len() as f64);
(min_cut, max_shift, e_aggregate)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parallel_config_default() {
let config = ParallelConfig::default();
assert_eq!(config.num_threads, 0);
assert!(config.work_stealing);
}
#[test]
fn test_parallel_fabric_creation() {
let config = ParallelConfig::default();
let fabric = ParallelFabric::new(config);
assert!(fabric.is_ok());
let fabric = fabric.unwrap();
assert_eq!(fabric.workers.len(), 255);
}
#[test]
fn test_parallel_process() {
let config = ParallelConfig::default();
let mut fabric = ParallelFabric::new(config).unwrap();
let syndrome = SyndromeDelta::new(1, 2, 100);
let decision = fabric.process_parallel(&syndrome);
assert!(decision.is_ok());
}
#[test]
fn test_parallel_aggregate() {
let reports: Vec<TileReport> = (1..=10)
.map(|i| {
let mut r = TileReport::new(i);
r.local_cut = i as f64 * 2.0;
r.shift_score = i as f64 * 0.05;
r.e_value = 100.0;
r
})
.collect();
let (min_cut, max_shift, e_agg) = parallel_aggregate(&reports);
assert_eq!(min_cut, 2.0); // First report has 2.0
assert!((max_shift - 0.5).abs() < 0.001); // Last report has 0.5
assert!((e_agg - 100.0).abs() < 0.001); // All have 100.0
}
}

View File

@@ -0,0 +1,555 @@
//! Data Model and Schema for ruQu
//!
//! Defines the core data types and a versioned binary log format.
//!
//! ## Binary Format
//!
//! The log format is designed for speed and compactness:
//! - 4-byte magic header: "RUQU"
//! - 1-byte version
//! - Sequence of variable-length records
//!
//! Each record:
//! - 1-byte record type
//! - 4-byte length (little-endian)
//! - Payload bytes
//! - 4-byte CRC32 checksum
use serde::{Deserialize, Serialize};
use std::io::{Read, Write};
/// Current schema version
pub const SCHEMA_VERSION: u8 = 1;
/// Magic header for binary logs
pub const LOG_MAGIC: &[u8; 4] = b"RUQU";
// ============================================================================
// CORE DATA TYPES
// ============================================================================
/// A single syndrome measurement round
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SyndromeRound {
/// Round number (monotonically increasing)
pub round_id: u64,
/// Timestamp in nanoseconds since epoch
pub timestamp_ns: u64,
/// Code distance
pub code_distance: u8,
/// Detector events in this round
pub events: Vec<DetectorEvent>,
/// Optional metadata
#[serde(default)]
pub metadata: RoundMetadata,
}
impl SyndromeRound {
/// Create a new syndrome round
pub fn new(round_id: u64, code_distance: u8) -> Self {
Self {
round_id,
timestamp_ns: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0),
code_distance,
events: Vec::new(),
metadata: RoundMetadata::default(),
}
}
/// Add a detector event
pub fn add_event(&mut self, event: DetectorEvent) {
self.events.push(event);
}
/// Get the number of fired detectors
pub fn fired_count(&self) -> usize {
self.events.iter().filter(|e| e.fired).count()
}
}
/// A detector event within a syndrome round
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DetectorEvent {
/// Detector index
pub detector_id: u32,
/// Whether the detector fired (syndrome bit = 1)
pub fired: bool,
/// Measurement confidence (0.0 to 1.0)
#[serde(default = "default_confidence")]
pub confidence: f32,
/// Spatial coordinates (if known)
#[serde(default)]
pub coords: Option<DetectorCoords>,
}
fn default_confidence() -> f32 {
1.0
}
/// Spatial coordinates of a detector
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub struct DetectorCoords {
/// X coordinate (column)
pub x: i16,
/// Y coordinate (row)
pub y: i16,
/// Time slice (for 3D codes)
pub t: i16,
}
/// Round metadata
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
pub struct RoundMetadata {
/// Source identifier
#[serde(default)]
pub source: String,
/// Error rate at this round (if known)
#[serde(default)]
pub error_rate: Option<f64>,
/// Whether this round is from a hardware run
#[serde(default)]
pub is_hardware: bool,
/// Injected fault (if any)
#[serde(default)]
pub injected_fault: Option<String>,
}
/// Boundary identifier for surface codes
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum BoundaryId {
/// Left boundary (X logical)
Left,
/// Right boundary (X logical)
Right,
/// Top boundary (Z logical)
Top,
/// Bottom boundary (Z logical)
Bottom,
/// Virtual boundary (for matching)
Virtual,
/// Custom boundary with ID
Custom(u32),
}
/// A permit token issued by the gate
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PermitToken {
/// Unique token ID
pub token_id: u64,
/// Round at which permit was issued
pub issued_at_round: u64,
/// Timestamp when issued (ns since epoch)
pub issued_at_ns: u64,
/// Time-to-live in nanoseconds
pub ttl_ns: u64,
/// Permitted regions (bitmask)
pub region_mask: u64,
/// Confidence level
pub confidence: f32,
/// Min-cut value at issuance
pub min_cut_value: f32,
}
impl PermitToken {
/// Check if the token is still valid
pub fn is_valid(&self, current_time_ns: u64) -> bool {
current_time_ns < self.issued_at_ns.saturating_add(self.ttl_ns)
}
/// Remaining time-to-live in nanoseconds
pub fn remaining_ttl_ns(&self, current_time_ns: u64) -> u64 {
self.issued_at_ns
.saturating_add(self.ttl_ns)
.saturating_sub(current_time_ns)
}
}
/// A gate decision record
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GateDecision {
/// Round ID when decision was made
pub round_id: u64,
/// Timestamp of decision (ns since epoch)
pub timestamp_ns: u64,
/// Decision type
pub decision: DecisionType,
/// Processing latency in nanoseconds
pub latency_ns: u64,
/// Input metrics
pub metrics: GateMetrics,
}
/// Decision type
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum DecisionType {
/// Operation permitted with token
Permit(PermitToken),
/// Operation deferred
Defer {
/// Wait time in nanoseconds
wait_ns: u64,
/// Uncertainty level
uncertainty: f32,
},
/// Operation denied
Deny {
/// Risk level (0-1)
risk_level: f32,
/// Affected region bitmask
affected_regions: u64,
},
}
/// Metrics used for gate decision
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
pub struct GateMetrics {
/// Min-cut value
pub min_cut: f32,
/// Cut value standard deviation
pub cut_std: f32,
/// Shift from baseline
pub shift: f32,
/// Evidence accumulation
pub evidence: f32,
/// Number of fired detectors
pub fired_count: u32,
/// Clustering score
pub clustering: f32,
}
/// A mitigation action taken
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct MitigationAction {
/// Action ID
pub action_id: u64,
/// Timestamp when action was initiated
pub timestamp_ns: u64,
/// Action type
pub action_type: ActionTypeSchema,
/// Target regions
pub target_regions: Vec<u32>,
/// Duration in nanoseconds
pub duration_ns: u64,
/// Result of the action
pub result: ActionResult,
}
/// Action types in schema format
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ActionTypeSchema {
/// Quarantine a region to prevent error propagation
QuarantineRegion,
/// Increase syndrome measurement rounds for higher fidelity
IncreaseSyndromeRounds,
/// Switch decoder mode (e.g., from fast to accurate)
SwitchDecodeMode,
/// Trigger re-weighting of decoder graph
TriggerReweight,
/// Pause learning/write operations during instability
PauseLearningWrites,
/// Log event for audit trail
LogEvent,
/// Alert human operator
AlertOperator,
}
/// Result of a mitigation action
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ActionResult {
/// Action completed successfully
Success,
/// Action partially completed
Partial {
/// Fraction completed (0.0 to 1.0)
completed: f32,
},
/// Action failed
Failed {
/// Reason for failure
reason: String,
},
/// Action is pending execution
Pending,
}
// ============================================================================
// BINARY LOG FORMAT
// ============================================================================
/// Record types for binary log
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum RecordType {
/// Syndrome round record
SyndromeRound = 1,
/// Gate decision record
GateDecision = 2,
/// Mitigation action record
MitigationAction = 3,
/// Checkpoint for log recovery
Checkpoint = 4,
/// Configuration snapshot
Config = 5,
/// Metrics snapshot
Metrics = 6,
}
impl TryFrom<u8> for RecordType {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
1 => Ok(RecordType::SyndromeRound),
2 => Ok(RecordType::GateDecision),
3 => Ok(RecordType::MitigationAction),
4 => Ok(RecordType::Checkpoint),
5 => Ok(RecordType::Config),
6 => Ok(RecordType::Metrics),
_ => Err(()),
}
}
}
/// Binary log writer
pub struct LogWriter<W: Write> {
writer: W,
record_count: u64,
}
impl<W: Write> LogWriter<W> {
/// Create a new log writer
pub fn new(mut writer: W) -> std::io::Result<Self> {
// Write header
writer.write_all(LOG_MAGIC)?;
writer.write_all(&[SCHEMA_VERSION])?;
Ok(Self {
writer,
record_count: 0,
})
}
/// Write a syndrome round
pub fn write_syndrome(&mut self, round: &SyndromeRound) -> std::io::Result<()> {
let payload = serde_json::to_vec(round)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
self.write_record(RecordType::SyndromeRound, &payload)
}
/// Write a gate decision
pub fn write_decision(&mut self, decision: &GateDecision) -> std::io::Result<()> {
let payload = serde_json::to_vec(decision)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
self.write_record(RecordType::GateDecision, &payload)
}
/// Write a mitigation action
pub fn write_action(&mut self, action: &MitigationAction) -> std::io::Result<()> {
let payload = serde_json::to_vec(action)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
self.write_record(RecordType::MitigationAction, &payload)
}
fn write_record(&mut self, record_type: RecordType, payload: &[u8]) -> std::io::Result<()> {
// Record type (1 byte)
self.writer.write_all(&[record_type as u8])?;
// Length (4 bytes, little-endian)
let len = payload.len() as u32;
self.writer.write_all(&len.to_le_bytes())?;
// Payload
self.writer.write_all(payload)?;
// CRC32 checksum
let crc = crc32fast::hash(payload);
self.writer.write_all(&crc.to_le_bytes())?;
self.record_count += 1;
Ok(())
}
/// Flush and get record count
pub fn finish(mut self) -> std::io::Result<u64> {
self.writer.flush()?;
Ok(self.record_count)
}
}
/// Binary log reader
pub struct LogReader<R: Read> {
reader: R,
version: u8,
}
impl<R: Read> LogReader<R> {
/// Open a log for reading
pub fn new(mut reader: R) -> std::io::Result<Self> {
// Read and verify header
let mut magic = [0u8; 4];
reader.read_exact(&mut magic)?;
if &magic != LOG_MAGIC {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Invalid magic header",
));
}
let mut version = [0u8; 1];
reader.read_exact(&mut version)?;
Ok(Self {
reader,
version: version[0],
})
}
/// Get schema version
pub fn version(&self) -> u8 {
self.version
}
/// Read next record
pub fn read_record(&mut self) -> std::io::Result<Option<LogRecord>> {
// Read record type
let mut type_byte = [0u8; 1];
match self.reader.read_exact(&mut type_byte) {
Ok(()) => (),
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
Err(e) => return Err(e),
}
let record_type = RecordType::try_from(type_byte[0]).map_err(|_| {
std::io::Error::new(std::io::ErrorKind::InvalidData, "Unknown record type")
})?;
// Read length
let mut len_bytes = [0u8; 4];
self.reader.read_exact(&mut len_bytes)?;
let len = u32::from_le_bytes(len_bytes) as usize;
// Read payload
let mut payload = vec![0u8; len];
self.reader.read_exact(&mut payload)?;
// Read and verify checksum
let mut crc_bytes = [0u8; 4];
self.reader.read_exact(&mut crc_bytes)?;
let stored_crc = u32::from_le_bytes(crc_bytes);
let computed_crc = crc32fast::hash(&payload);
if stored_crc != computed_crc {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"CRC mismatch",
));
}
// Parse payload
let record = match record_type {
RecordType::SyndromeRound => {
let round: SyndromeRound = serde_json::from_slice(&payload)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
LogRecord::Syndrome(round)
}
RecordType::GateDecision => {
let decision: GateDecision = serde_json::from_slice(&payload)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
LogRecord::Decision(decision)
}
RecordType::MitigationAction => {
let action: MitigationAction = serde_json::from_slice(&payload)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
LogRecord::Action(action)
}
_ => LogRecord::Unknown(payload),
};
Ok(Some(record))
}
}
/// A record from the log
#[derive(Debug, Clone)]
pub enum LogRecord {
/// Syndrome round record
Syndrome(SyndromeRound),
/// Gate decision record
Decision(GateDecision),
/// Mitigation action record
Action(MitigationAction),
/// Unknown record type (for forward compatibility)
Unknown(Vec<u8>),
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
#[test]
fn test_syndrome_round() {
let mut round = SyndromeRound::new(1, 5);
round.add_event(DetectorEvent {
detector_id: 0,
fired: true,
confidence: 0.99,
coords: Some(DetectorCoords { x: 0, y: 0, t: 0 }),
});
round.add_event(DetectorEvent {
detector_id: 1,
fired: false,
confidence: 1.0,
coords: None,
});
assert_eq!(round.fired_count(), 1);
}
#[test]
fn test_permit_token_validity() {
let token = PermitToken {
token_id: 1,
issued_at_round: 100,
issued_at_ns: 1000000,
ttl_ns: 100000,
region_mask: 0xFF,
confidence: 0.95,
min_cut_value: 5.5,
};
assert!(token.is_valid(1050000));
assert!(!token.is_valid(1200000));
assert_eq!(token.remaining_ttl_ns(1050000), 50000);
}
#[test]
fn test_log_roundtrip() {
let mut buffer = Vec::new();
// Write
{
let mut writer = LogWriter::new(&mut buffer).unwrap();
let round = SyndromeRound::new(1, 5);
writer.write_syndrome(&round).unwrap();
writer.finish().unwrap();
}
// Read
{
let mut reader = LogReader::new(Cursor::new(&buffer)).unwrap();
assert_eq!(reader.version(), SCHEMA_VERSION);
let record = reader.read_record().unwrap().unwrap();
match record {
LogRecord::Syndrome(round) => {
assert_eq!(round.round_id, 1);
assert_eq!(round.code_distance, 5);
}
_ => panic!("Expected syndrome record"),
}
}
}
}

462
vendor/ruvector/crates/ruQu/src/stim.rs vendored Normal file
View File

@@ -0,0 +1,462 @@
//! Stim Integration for Real QEC Simulation
//!
//! This module provides integration with the Stim quantum error correction
//! simulator, enabling realistic syndrome generation and testing.
//!
//! ## What is Stim?
//!
//! [Stim](https://github.com/quantumlib/Stim) is Google's high-performance
//! stabilizer circuit simulator for quantum error correction. It can generate
//! realistic syndrome data at rates exceeding 1 billion measurements per second.
//!
//! ## Usage
//!
//! ```rust,ignore
//! use ruqu::stim::{StimSyndromeSource, SurfaceCodeConfig};
//!
//! // Create a surface code syndrome source
//! let config = SurfaceCodeConfig::new(7, 0.001); // distance 7, 0.1% error rate
//! let mut source = StimSyndromeSource::new(config)?;
//!
//! // Generate syndromes
//! for round in 0..1000 {
//! let detectors = source.sample()?;
//! fabric.process(&detectors)?;
//! }
//! ```
//!
//! ## Supported Codes
//!
//! - Surface code (rotated and unrotated)
//! - Repetition code
//! - Color code (planned)
use crate::error::{Result, RuQuError};
use crate::syndrome::DetectorBitmap;
/// Configuration for surface code simulation
#[derive(Clone, Debug)]
pub struct SurfaceCodeConfig {
/// Code distance (odd integer, typically 3-21)
pub distance: usize,
/// Physical error rate (0.0-1.0)
pub error_rate: f64,
/// Number of syndrome rounds per measurement
pub rounds: usize,
/// Use rotated surface code layout
pub rotated: bool,
/// Include measurement errors
pub measure_errors: bool,
/// Random seed (None = use system entropy)
pub seed: Option<u64>,
}
impl SurfaceCodeConfig {
/// Create a new surface code configuration
pub fn new(distance: usize, error_rate: f64) -> Self {
Self {
distance,
error_rate,
rounds: distance,
rotated: true,
measure_errors: true,
seed: None,
}
}
/// Calculate number of data qubits
pub fn data_qubits(&self) -> usize {
self.distance * self.distance
}
/// Calculate number of syndrome qubits (ancillas)
pub fn syndrome_qubits(&self) -> usize {
// Rotated surface code: (d-1)^2 X stabilizers + (d-1)^2 Z stabilizers
// Approximately d^2 - 1 total
(self.distance - 1) * (self.distance - 1) * 2
}
/// Calculate number of detectors per round
pub fn detectors_per_round(&self) -> usize {
self.syndrome_qubits()
}
/// Calculate total detectors across all rounds
pub fn total_detectors(&self) -> usize {
self.detectors_per_round() * self.rounds
}
/// Builder: set error rate
pub fn with_error_rate(mut self, rate: f64) -> Self {
self.error_rate = rate;
self
}
/// Builder: set measurement error rate
pub fn with_measurement_error_rate(mut self, _rate: f64) -> Self {
// Store as a fraction of error_rate for now
self.measure_errors = true;
self
}
/// Builder: set random seed for reproducibility
pub fn with_seed(mut self, seed: u64) -> Self {
self.seed = Some(seed);
self
}
/// Builder: set number of rounds
pub fn with_rounds(mut self, rounds: usize) -> Self {
self.rounds = rounds;
self
}
}
impl Default for SurfaceCodeConfig {
fn default() -> Self {
Self::new(5, 0.001) // Distance 5, 0.1% error rate
}
}
/// Simple pseudo-random number generator (xorshift64)
struct Xorshift64 {
state: u64,
}
impl Xorshift64 {
fn new(seed: u64) -> Self {
Self {
state: if seed == 0 { 0xDEADBEEF } else { seed },
}
}
fn next(&mut self) -> u64 {
let mut x = self.state;
x ^= x << 13;
x ^= x >> 7;
x ^= x << 17;
self.state = x;
x
}
fn next_f64(&mut self) -> f64 {
(self.next() as f64) / (u64::MAX as f64)
}
}
/// Syndrome source using stim-like simulation
///
/// When the `stim` feature is enabled, this uses the actual stim-rs bindings.
/// Otherwise, it provides a compatible fallback implementation.
pub struct StimSyndromeSource {
config: SurfaceCodeConfig,
rng: Xorshift64,
round: u64,
/// Cached detector positions for correlation modeling
detector_coords: Vec<(usize, usize)>,
}
impl StimSyndromeSource {
/// Create a new syndrome source
pub fn new(config: SurfaceCodeConfig) -> Result<Self> {
let seed = config.seed.unwrap_or_else(|| {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(12345)
});
// Pre-compute detector coordinates for correlation modeling
let mut detector_coords = Vec::new();
let d = config.distance;
for r in 0..d - 1 {
for c in 0..d - 1 {
// X stabilizers
detector_coords.push((r, c));
// Z stabilizers (offset grid)
detector_coords.push((r, c + d));
}
}
Ok(Self {
config,
rng: Xorshift64::new(seed),
round: 0,
detector_coords,
})
}
/// Sample a single syndrome round
pub fn sample(&mut self) -> Result<DetectorBitmap> {
let num_detectors = self.config.detectors_per_round();
let mut bitmap = DetectorBitmap::new(num_detectors);
// Simulate depolarizing noise channel
for i in 0..num_detectors {
// Each detector fires with probability related to error rate
// In a real surface code, this is more complex (depends on neighbors)
let p = self.effective_detection_probability(i);
if self.rng.next_f64() < p {
bitmap.set(i, true);
}
}
// Add correlated errors (simulates real error patterns)
self.add_correlated_errors(&mut bitmap);
self.round += 1;
Ok(bitmap)
}
/// Sample multiple rounds
pub fn sample_batch(&mut self, count: usize) -> Result<Vec<DetectorBitmap>> {
(0..count).map(|_| self.sample()).collect()
}
/// Get current round number
pub fn current_round(&self) -> u64 {
self.round
}
/// Reset to initial state
pub fn reset(&mut self) {
self.round = 0;
self.rng = Xorshift64::new(self.config.seed.unwrap_or(12345));
}
// Private helpers
fn effective_detection_probability(&self, detector_idx: usize) -> f64 {
// Base probability from physical error rate
// In surface code, detector fires when syndrome changes
// P(detection) ≈ 2p(1-p) for single qubit error, where p = error_rate
let p = self.config.error_rate;
let base_prob = 2.0 * p * (1.0 - p);
// Add measurement error contribution
let measure_prob = if self.config.measure_errors {
p * 0.5 // Measurement errors contribute less
} else {
0.0
};
(base_prob + measure_prob).min(1.0)
}
fn add_correlated_errors(&mut self, bitmap: &mut DetectorBitmap) {
// Model correlated errors (cosmic rays, TLS defects, etc.)
// These create "stripes" of detections
// Probability of a correlated event per round
let cosmic_ray_prob = 0.001 * self.config.error_rate;
if self.rng.next_f64() < cosmic_ray_prob {
// Correlated error: affect a row or column of detectors
let is_row = self.rng.next_f64() < 0.5;
let d = self.config.distance;
let idx = (self.rng.next() as usize) % (d - 1);
for i in 0..d - 1 {
let detector = if is_row {
idx * (d - 1) + i
} else {
i * (d - 1) + idx
};
if detector < bitmap.detector_count() {
// Flip the detector
let current = bitmap.get(detector);
bitmap.set(detector, !current);
}
}
}
}
}
/// Generate syndrome data matching a specific error pattern
pub struct ErrorPatternGenerator {
config: SurfaceCodeConfig,
}
impl ErrorPatternGenerator {
/// Create a new pattern generator
pub fn new(config: SurfaceCodeConfig) -> Self {
Self { config }
}
/// Generate syndrome for a single X error at position (row, col)
pub fn single_x_error(&self, row: usize, col: usize) -> DetectorBitmap {
let num_detectors = self.config.detectors_per_round();
let mut bitmap = DetectorBitmap::new(num_detectors);
let d = self.config.distance;
// X error triggers neighboring Z stabilizers
// In rotated surface code, each data qubit borders up to 4 Z stabilizers
let z_offset = (d - 1) * (d - 1); // Z stabilizers are after X stabilizers
// Add detections for neighboring Z stabilizers
if row > 0 && col < d - 1 {
bitmap.set(z_offset + (row - 1) * (d - 1) + col, true);
}
if row < d - 1 && col < d - 1 {
bitmap.set(z_offset + row * (d - 1) + col, true);
}
bitmap
}
/// Generate syndrome for a single Z error at position (row, col)
pub fn single_z_error(&self, row: usize, col: usize) -> DetectorBitmap {
let num_detectors = self.config.detectors_per_round();
let mut bitmap = DetectorBitmap::new(num_detectors);
let d = self.config.distance;
// Z error triggers neighboring X stabilizers
if row < d - 1 && col > 0 {
bitmap.set(row * (d - 1) + (col - 1), true);
}
if row < d - 1 && col < d - 1 {
bitmap.set(row * (d - 1) + col, true);
}
bitmap
}
/// Generate syndrome for a logical X error (horizontal string)
pub fn logical_x_error(&self) -> DetectorBitmap {
let num_detectors = self.config.detectors_per_round();
let mut bitmap = DetectorBitmap::new(num_detectors);
// Logical X is a string of X errors across the code
// Only boundary stabilizers detect it
let d = self.config.distance;
let z_offset = (d - 1) * (d - 1);
// Top boundary Z stabilizers
for col in 0..d - 1 {
bitmap.set(z_offset + col, true);
}
bitmap
}
}
/// Statistics about generated syndromes
#[derive(Clone, Debug, Default)]
pub struct SyndromeStats {
/// Total syndromes generated
pub total_syndromes: u64,
/// Total detectors fired
pub total_detections: u64,
/// Average detection rate
pub avg_detection_rate: f64,
/// Maximum detections in a single syndrome
pub max_detections: usize,
/// Estimated logical error rate
pub estimated_logical_error_rate: f64,
}
impl SyndromeStats {
/// Update stats with a new syndrome
pub fn update(&mut self, bitmap: &DetectorBitmap) {
self.total_syndromes += 1;
let fired = bitmap.fired_count();
self.total_detections += fired as u64;
if fired > self.max_detections {
self.max_detections = fired;
}
self.avg_detection_rate = self.total_detections as f64
/ (self.total_syndromes as f64 * bitmap.detector_count() as f64);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_surface_code_config() {
let config = SurfaceCodeConfig::new(7, 0.001);
assert_eq!(config.distance, 7);
assert_eq!(config.data_qubits(), 49);
assert!(config.syndrome_qubits() > 0);
}
#[test]
fn test_syndrome_source_creation() {
let config = SurfaceCodeConfig::new(5, 0.01);
let source = StimSyndromeSource::new(config);
assert!(source.is_ok());
}
#[test]
fn test_syndrome_sampling() {
let config = SurfaceCodeConfig::new(5, 0.1); // High error rate for testing
let mut source = StimSyndromeSource::new(config).unwrap();
let bitmap = source.sample().unwrap();
// Should have correct number of detectors
assert_eq!(bitmap.detector_count(), source.config.detectors_per_round());
}
#[test]
fn test_syndrome_batch() {
let config = SurfaceCodeConfig::new(5, 0.01);
let mut source = StimSyndromeSource::new(config).unwrap();
let batch = source.sample_batch(100).unwrap();
assert_eq!(batch.len(), 100);
}
#[test]
fn test_error_pattern_generator() {
let config = SurfaceCodeConfig::new(5, 0.01);
let gen = ErrorPatternGenerator::new(config);
let x_error = gen.single_x_error(2, 2);
assert!(x_error.fired_count() <= 4); // At most 4 neighboring stabilizers
let logical = gen.logical_x_error();
assert!(logical.fired_count() > 0);
}
#[test]
fn test_syndrome_stats() {
let mut stats = SyndromeStats::default();
let config = SurfaceCodeConfig::new(5, 0.1);
let mut source = StimSyndromeSource::new(config).unwrap();
for _ in 0..100 {
let bitmap = source.sample().unwrap();
stats.update(&bitmap);
}
assert_eq!(stats.total_syndromes, 100);
assert!(stats.avg_detection_rate > 0.0);
}
#[test]
fn test_xorshift_rng() {
let mut rng = Xorshift64::new(12345);
// Should produce different values
let a = rng.next();
let b = rng.next();
assert_ne!(a, b);
// f64 should be in [0, 1)
for _ in 0..100 {
let f = rng.next_f64();
assert!(f >= 0.0 && f < 1.0);
}
}
}

File diff suppressed because it is too large Load Diff

2124
vendor/ruvector/crates/ruQu/src/tile.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,468 @@
//! Standard Interface Traits for ruQu
//!
//! These traits define the pluggable interfaces for ruQu, allowing:
//! - Different syndrome sources (simulators, hardware)
//! - Different gate engines (min-cut, heuristic, ML)
//! - Different action sinks (logging, hardware control)
//!
//! This keeps the core logic stable while data sources and backends change.
use crate::syndrome::DetectorBitmap;
use std::time::Duration;
/// Error type for trait implementations
#[derive(Debug, Clone, thiserror::Error)]
pub enum TraitError {
/// Source has no more data
#[error("Source exhausted")]
SourceExhausted,
/// Hardware communication error
#[error("Hardware error: {0}")]
HardwareError(String),
/// Configuration error
#[error("Configuration error: {0}")]
ConfigError(String),
/// Operation timed out
#[error("Timeout after {0:?}")]
Timeout(Duration),
}
/// Result type for trait operations
pub type TraitResult<T> = Result<T, TraitError>;
// ============================================================================
// SYNDROME SOURCE TRAIT
// ============================================================================
/// A source of syndrome data (detector events)
///
/// Implementations can be:
/// - Stim-based simulator
/// - File replay
/// - Hardware interface
/// - Network stream
pub trait SyndromeSource: Send {
/// Sample the next syndrome round
fn sample(&mut self) -> TraitResult<DetectorBitmap>;
/// Get the number of detectors per round
fn num_detectors(&self) -> usize;
/// Get the code distance (if known)
fn code_distance(&self) -> Option<usize> {
None
}
/// Check if the source is exhausted (for finite sources)
fn is_exhausted(&self) -> bool {
false
}
/// Reset the source to the beginning (if supported)
fn reset(&mut self) -> TraitResult<()> {
Err(TraitError::ConfigError("Reset not supported".into()))
}
/// Get source metadata
fn metadata(&self) -> SourceMetadata {
SourceMetadata::default()
}
}
/// Metadata about a syndrome source
#[derive(Debug, Clone, Default)]
pub struct SourceMetadata {
/// Human-readable name
pub name: String,
/// Code distance
pub code_distance: Option<usize>,
/// Error rate (if known)
pub error_rate: Option<f64>,
/// Number of rounds (if finite)
pub total_rounds: Option<u64>,
/// Source version/format
pub version: String,
}
// ============================================================================
// TELEMETRY SOURCE TRAIT
// ============================================================================
/// A source of telemetry data (temperature, timing, etc.)
pub trait TelemetrySource: Send {
/// Get current telemetry snapshot
fn snapshot(&self) -> TelemetrySnapshot;
/// Check if telemetry indicates a problem
fn has_alert(&self) -> bool {
false
}
}
/// Telemetry data snapshot
#[derive(Debug, Clone, Default)]
pub struct TelemetrySnapshot {
/// Timestamp in nanoseconds since epoch
pub timestamp_ns: u64,
/// Fridge temperature in Kelvin (if available)
pub fridge_temp_k: Option<f64>,
/// Qubit temperatures (per qubit, if available)
pub qubit_temps: Vec<f64>,
/// Readout fidelity estimates
pub readout_fidelity: Vec<f64>,
/// Gate error estimates
pub gate_errors: Vec<f64>,
/// Custom key-value pairs
pub custom: Vec<(String, f64)>,
}
// ============================================================================
// GATE ENGINE TRAIT
// ============================================================================
/// A gate decision engine
///
/// Takes syndrome data and produces permit/defer/deny decisions.
pub trait GateEngine: Send {
/// Process a syndrome round and return a decision
fn process(&mut self, syndrome: &DetectorBitmap) -> GateDecision;
/// Get the current risk assessment
fn risk_assessment(&self) -> RiskAssessment;
/// Update thresholds or parameters
fn update_config(&mut self, config: GateConfig) -> TraitResult<()>;
/// Get engine statistics
fn statistics(&self) -> EngineStatistics;
/// Reset engine state
fn reset(&mut self);
}
/// Gate decision output
#[derive(Debug, Clone, PartialEq)]
pub enum GateDecision {
/// Permit the operation - low risk
Permit {
/// Confidence level (0.0 to 1.0)
confidence: f64,
/// Time-to-live in nanoseconds
ttl_ns: u64,
/// Optional explanation
reason: Option<String>,
},
/// Defer - uncertain, need more data
Defer {
/// Suggested wait time in nanoseconds
wait_ns: u64,
/// Uncertainty level
uncertainty: f64,
},
/// Deny - high risk detected
Deny {
/// Risk level (0.0 to 1.0)
risk_level: f64,
/// Recommended action
recommended_action: String,
/// Affected regions (bitmask or list)
affected_regions: Vec<u32>,
},
}
impl Default for GateDecision {
fn default() -> Self {
GateDecision::Defer {
wait_ns: 1000,
uncertainty: 1.0,
}
}
}
/// Risk assessment from the gate engine
#[derive(Debug, Clone, Default)]
pub struct RiskAssessment {
/// Overall risk level (0.0 = safe, 1.0 = critical)
pub overall_risk: f64,
/// Structural risk (from min-cut)
pub structural_risk: f64,
/// Temporal risk (from recent history)
pub temporal_risk: f64,
/// Spatial risk (from region clustering)
pub spatial_risk: f64,
/// Risk per region
pub region_risks: Vec<(u32, f64)>,
/// Confidence in assessment
pub confidence: f64,
}
/// Gate engine configuration
#[derive(Debug, Clone)]
pub struct GateConfig {
/// Minimum cut threshold for permit
pub min_cut_threshold: f64,
/// Maximum shift for permit
pub max_shift: f64,
/// Permit tau threshold
pub tau_permit: f64,
/// Deny tau threshold
pub tau_deny: f64,
/// Permit time-to-live in ns
pub permit_ttl_ns: u64,
}
impl Default for GateConfig {
fn default() -> Self {
Self {
min_cut_threshold: 5.0,
max_shift: 0.2,
tau_permit: 0.3,
tau_deny: 0.7,
permit_ttl_ns: 100_000,
}
}
}
/// Statistics from the gate engine
#[derive(Debug, Clone, Default)]
pub struct EngineStatistics {
/// Total rounds processed
pub total_rounds: u64,
/// Permits issued
pub permits: u64,
/// Defers issued
pub defers: u64,
/// Denies issued
pub denies: u64,
/// Average processing time in nanoseconds
pub avg_process_ns: f64,
/// P99 processing time in nanoseconds
pub p99_process_ns: u64,
/// P999 processing time in nanoseconds
pub p999_process_ns: u64,
/// Max processing time in nanoseconds
pub max_process_ns: u64,
}
// ============================================================================
// ACTION SINK TRAIT
// ============================================================================
/// A sink for mitigation actions
///
/// Receives actions from the gate engine and executes them.
pub trait ActionSink: Send {
/// Execute an action
fn execute(&mut self, action: &MitigationAction) -> TraitResult<ActionResult>;
/// Check if an action is supported
fn supports(&self, action_type: ActionType) -> bool;
/// Get sink capabilities
fn capabilities(&self) -> ActionCapabilities;
}
/// Types of mitigation actions
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ActionType {
/// Quarantine a region
QuarantineRegion,
/// Increase syndrome measurement rounds
IncreaseSyndromeRounds,
/// Switch decoder mode
SwitchDecodeMode,
/// Trigger re-weighting
TriggerReweight,
/// Pause learning/writes
PauseLearningWrites,
/// Log event
LogEvent,
/// Alert operator
AlertOperator,
/// Inject test error
InjectTestError,
}
/// A mitigation action to execute
#[derive(Debug, Clone)]
pub struct MitigationAction {
/// Action type
pub action_type: ActionType,
/// Target region(s)
pub target_regions: Vec<u32>,
/// Parameters (action-specific)
pub parameters: ActionParameters,
/// Priority (higher = more urgent)
pub priority: u8,
/// Preconditions that must be true
pub preconditions: Vec<Precondition>,
/// Estimated cost
pub estimated_cost: ActionCost,
/// Expected effect
pub expected_effect: String,
}
/// Action parameters
#[derive(Debug, Clone, Default)]
pub struct ActionParameters {
/// Duration in nanoseconds (if applicable)
pub duration_ns: Option<u64>,
/// Intensity level (0.0 to 1.0)
pub intensity: Option<f64>,
/// Custom key-value pairs
pub custom: Vec<(String, String)>,
}
/// Precondition for an action
#[derive(Debug, Clone)]
pub enum Precondition {
/// Risk level must be above threshold
RiskAbove(f64),
/// Risk level must be below threshold
RiskBelow(f64),
/// Region must be in specified state
RegionState(u32, String),
/// Time since last action of this type
TimeSinceLastAction(ActionType, Duration),
/// Custom condition
Custom(String),
}
/// Cost estimate for an action
#[derive(Debug, Clone, Default)]
pub struct ActionCost {
/// Time cost in nanoseconds
pub time_ns: u64,
/// Qubit overhead (extra qubits needed)
pub qubit_overhead: u32,
/// Fidelity impact (0.0 = no impact, 1.0 = total loss)
pub fidelity_impact: f64,
/// Throughput impact (0.0 = no impact, 1.0 = total stop)
pub throughput_impact: f64,
}
/// Result of executing an action
#[derive(Debug, Clone)]
pub struct ActionResult {
/// Whether the action succeeded
pub success: bool,
/// Actual cost incurred
pub actual_cost: ActionCost,
/// Any warnings or notes
pub notes: Vec<String>,
}
/// Capabilities of an action sink
#[derive(Debug, Clone, Default)]
pub struct ActionCapabilities {
/// Supported action types
pub supported_actions: Vec<ActionType>,
/// Maximum concurrent actions
pub max_concurrent: u32,
/// Minimum action interval in nanoseconds
pub min_interval_ns: u64,
}
// ============================================================================
// CONVENIENCE IMPLEMENTATIONS
// ============================================================================
/// Null syndrome source for testing
pub struct NullSyndromeSource {
num_detectors: usize,
}
impl NullSyndromeSource {
/// Create a new null syndrome source
pub fn new(num_detectors: usize) -> Self {
Self { num_detectors }
}
}
impl SyndromeSource for NullSyndromeSource {
fn sample(&mut self) -> TraitResult<DetectorBitmap> {
Ok(DetectorBitmap::new(self.num_detectors))
}
fn num_detectors(&self) -> usize {
self.num_detectors
}
}
/// Logging action sink
pub struct LoggingActionSink {
log_prefix: String,
}
impl LoggingActionSink {
/// Create a new logging action sink with given prefix
pub fn new(prefix: &str) -> Self {
Self {
log_prefix: prefix.to_string(),
}
}
}
impl ActionSink for LoggingActionSink {
fn execute(&mut self, action: &MitigationAction) -> TraitResult<ActionResult> {
println!(
"{}: {:?} on regions {:?}",
self.log_prefix, action.action_type, action.target_regions
);
Ok(ActionResult {
success: true,
actual_cost: ActionCost::default(),
notes: vec![],
})
}
fn supports(&self, _action_type: ActionType) -> bool {
true
}
fn capabilities(&self) -> ActionCapabilities {
ActionCapabilities {
supported_actions: vec![ActionType::LogEvent, ActionType::AlertOperator],
max_concurrent: 100,
min_interval_ns: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_null_syndrome_source() {
let mut source = NullSyndromeSource::new(100);
let syndrome = source.sample().unwrap();
assert_eq!(syndrome.fired_count(), 0);
assert_eq!(source.num_detectors(), 100);
}
#[test]
fn test_gate_decision_default() {
let decision = GateDecision::default();
match decision {
GateDecision::Defer { .. } => (),
_ => panic!("Default should be Defer"),
}
}
#[test]
fn test_logging_action_sink() {
let mut sink = LoggingActionSink::new("[TEST]");
let action = MitigationAction {
action_type: ActionType::LogEvent,
target_regions: vec![1, 2, 3],
parameters: ActionParameters::default(),
priority: 5,
preconditions: vec![],
estimated_cost: ActionCost::default(),
expected_effect: "Log the event".into(),
};
let result = sink.execute(&action).unwrap();
assert!(result.success);
}
}

854
vendor/ruvector/crates/ruQu/src/types.rs vendored Normal file
View File

@@ -0,0 +1,854 @@
//! Core domain types for the ruQu coherence gate system
//!
//! This module defines the fundamental types used throughout the coherence
//! gate, including decisions, masks, identifiers, and result structures.
use serde::{Deserialize, Serialize};
// ═══════════════════════════════════════════════════════════════════════════
// Identifier Types
// ═══════════════════════════════════════════════════════════════════════════
/// Cycle identifier - monotonically increasing per measurement cycle
pub type CycleId = u64;
/// Round identifier - syndrome measurement round within a cycle
pub type RoundId = u64;
/// Tile identifier (0 = TileZero coordinator, 1-255 = worker tiles)
pub type TileId = u8;
/// Vertex identifier in the operational graph
pub type VertexId = u64;
/// Edge identifier in the operational graph
pub type EdgeId = u64;
/// Action identifier for permit tokens
pub type ActionId = String;
/// Decision sequence number
pub type SequenceId = u64;
// ═══════════════════════════════════════════════════════════════════════════
// Gate Decision Types
// ═══════════════════════════════════════════════════════════════════════════
/// The three-valued gate decision outcome
///
/// This is the primary output of the coherence assessment:
/// - `Safe`: System is coherent, action is authorized
/// - `Cautious`: Uncertainty detected, elevated monitoring recommended
/// - `Unsafe`: Incoherence detected, region should be quarantined
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum GateDecision {
/// System is coherent enough to trust action
///
/// All three filters passed with sufficient margin.
/// A permit token will be issued.
Safe,
/// Uncertainty detected, proceed with caution
///
/// One or more filters are in warning range but not failing.
/// Elevated monitoring and conservative decoder recommended.
Cautious,
/// Incoherence detected, quarantine region
///
/// At least one filter has hard-failed.
/// The affected region should be isolated from action.
Unsafe,
}
impl GateDecision {
/// Check if decision permits action
#[inline]
pub fn permits_action(&self) -> bool {
matches!(self, GateDecision::Safe)
}
/// Check if decision requires escalation
#[inline]
pub fn requires_escalation(&self) -> bool {
matches!(self, GateDecision::Cautious | GateDecision::Unsafe)
}
/// Check if decision requires quarantine
#[inline]
pub fn requires_quarantine(&self) -> bool {
matches!(self, GateDecision::Unsafe)
}
/// Convert to cognitum-gate-tilezero compatible decision
#[cfg(feature = "tilezero")]
pub fn to_tilezero(&self) -> cognitum_gate_tilezero::GateDecision {
match self {
GateDecision::Safe => cognitum_gate_tilezero::GateDecision::Permit,
GateDecision::Cautious => cognitum_gate_tilezero::GateDecision::Defer,
GateDecision::Unsafe => cognitum_gate_tilezero::GateDecision::Deny,
}
}
}
impl std::fmt::Display for GateDecision {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GateDecision::Safe => write!(f, "SAFE"),
GateDecision::Cautious => write!(f, "CAUTIOUS"),
GateDecision::Unsafe => write!(f, "UNSAFE"),
}
}
}
impl Default for GateDecision {
fn default() -> Self {
GateDecision::Cautious // Conservative default
}
}
// ═══════════════════════════════════════════════════════════════════════════
// Verdict (Simplified Decision)
// ═══════════════════════════════════════════════════════════════════════════
/// Simplified verdict for filter outcomes
///
/// Used internally by individual filters before combining into GateDecision.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum Verdict {
/// Filter passed, action permitted
Permit,
/// Filter inconclusive, defer to human/stronger model
Defer,
/// Filter failed, action denied
Deny,
}
impl Verdict {
/// Convert verdict to gate decision
pub fn to_gate_decision(&self) -> GateDecision {
match self {
Verdict::Permit => GateDecision::Safe,
Verdict::Defer => GateDecision::Cautious,
Verdict::Deny => GateDecision::Unsafe,
}
}
}
impl std::fmt::Display for Verdict {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Verdict::Permit => write!(f, "permit"),
Verdict::Defer => write!(f, "defer"),
Verdict::Deny => write!(f, "deny"),
}
}
}
// ═══════════════════════════════════════════════════════════════════════════
// Region Mask (256-bit)
// ═══════════════════════════════════════════════════════════════════════════
/// 256-bit mask identifying affected tiles/regions
///
/// Each bit corresponds to a tile ID (0-255). Used to indicate which
/// regions are affected by a decision, which need quarantine, etc.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct RegionMask {
/// Four 64-bit words covering 256 bits
bits: [u64; 4],
}
impl RegionMask {
/// Create a mask with all bits clear (no regions)
#[inline]
pub const fn none() -> Self {
Self { bits: [0; 4] }
}
/// Create a mask with all bits set (all regions)
#[inline]
pub const fn all() -> Self {
Self {
bits: [u64::MAX; 4],
}
}
/// Create a mask from a slice of tile IDs
pub fn from_tiles(tiles: &[TileId]) -> Self {
let mut mask = Self::none();
for &tile in tiles {
mask.set(tile);
}
mask
}
/// Create a mask from raw bits
#[inline]
pub const fn from_bits(bits: [u64; 4]) -> Self {
Self { bits }
}
/// Get the raw bits
#[inline]
pub const fn bits(&self) -> [u64; 4] {
self.bits
}
/// Set a specific tile bit
#[inline]
pub fn set(&mut self, tile: TileId) {
let word = (tile / 64) as usize;
let bit = tile % 64;
self.bits[word] |= 1u64 << bit;
}
/// Clear a specific tile bit
#[inline]
pub fn clear(&mut self, tile: TileId) {
let word = (tile / 64) as usize;
let bit = tile % 64;
self.bits[word] &= !(1u64 << bit);
}
/// Check if a specific tile is set
#[inline]
pub fn is_set(&self, tile: TileId) -> bool {
let word = (tile / 64) as usize;
let bit = tile % 64;
(self.bits[word] & (1u64 << bit)) != 0
}
/// Count the number of set bits
#[inline]
pub fn count(&self) -> u32 {
self.bits.iter().map(|w| w.count_ones()).sum()
}
/// Check if mask is empty (no tiles set)
#[inline]
pub fn is_empty(&self) -> bool {
self.bits.iter().all(|&w| w == 0)
}
/// Check if mask is full (all tiles set)
#[inline]
pub fn is_full(&self) -> bool {
self.bits.iter().all(|&w| w == u64::MAX)
}
/// Compute union (OR) with another mask
#[inline]
pub fn union(&self, other: &RegionMask) -> RegionMask {
RegionMask {
bits: [
self.bits[0] | other.bits[0],
self.bits[1] | other.bits[1],
self.bits[2] | other.bits[2],
self.bits[3] | other.bits[3],
],
}
}
/// Compute intersection (AND) with another mask
#[inline]
pub fn intersection(&self, other: &RegionMask) -> RegionMask {
RegionMask {
bits: [
self.bits[0] & other.bits[0],
self.bits[1] & other.bits[1],
self.bits[2] & other.bits[2],
self.bits[3] & other.bits[3],
],
}
}
/// Check if this mask intersects with another
#[inline]
pub fn intersects(&self, other: &RegionMask) -> bool {
!self.intersection(other).is_empty()
}
/// Compute complement (NOT) of this mask
#[inline]
pub fn complement(&self) -> RegionMask {
RegionMask {
bits: [!self.bits[0], !self.bits[1], !self.bits[2], !self.bits[3]],
}
}
/// Iterate over set tile IDs
pub fn iter_set(&self) -> impl Iterator<Item = TileId> + '_ {
(0u8..=255).filter(|&t| self.is_set(t))
}
}
impl Default for RegionMask {
fn default() -> Self {
Self::none()
}
}
impl std::fmt::Display for RegionMask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "RegionMask({} tiles)", self.count())
}
}
impl std::ops::BitOr for RegionMask {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.union(&rhs)
}
}
impl std::ops::BitAnd for RegionMask {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
self.intersection(&rhs)
}
}
impl std::ops::Not for RegionMask {
type Output = Self;
fn not(self) -> Self::Output {
self.complement()
}
}
// ═══════════════════════════════════════════════════════════════════════════
// Permit Token
// ═══════════════════════════════════════════════════════════════════════════
/// A signed permit token authorizing action on coherent regions
///
/// Tokens are issued by TileZero when the coherence gate decides SAFE.
/// They include cryptographic proof of the decision for audit purposes.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PermitToken {
/// Decision that led to this permit
pub decision: GateDecision,
/// Action being permitted
pub action_id: ActionId,
/// Regions covered by this permit
pub region_mask: RegionMask,
/// Timestamp of issuance (nanoseconds since epoch)
pub issued_at: u64,
/// Expiration timestamp (nanoseconds since epoch)
pub expires_at: u64,
/// Sequence number for ordering
pub sequence: SequenceId,
/// Blake3 hash of the witness data
#[serde(with = "hex_array")]
pub witness_hash: [u8; 32],
/// Ed25519 signature (64 bytes)
#[serde(with = "hex_array")]
pub signature: [u8; 64],
}
impl PermitToken {
/// Check if token is currently valid (not expired)
pub fn is_valid(&self, now_ns: u64) -> bool {
now_ns >= self.issued_at && now_ns < self.expires_at
}
/// Get time-to-live in nanoseconds
pub fn ttl_ns(&self) -> u64 {
self.expires_at.saturating_sub(self.issued_at)
}
/// Check if token covers a specific tile
pub fn covers_tile(&self, tile: TileId) -> bool {
self.region_mask.is_set(tile)
}
}
// ═══════════════════════════════════════════════════════════════════════════
// Filter Results
// ═══════════════════════════════════════════════════════════════════════════
/// Combined results from all three filters
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FilterResults {
/// Structural filter (min-cut) result
pub structural: StructuralResult,
/// Shift filter (drift detection) result
pub shift: ShiftResult,
/// Evidence filter (e-value) result
pub evidence: EvidenceResult,
}
impl FilterResults {
/// Compute overall verdict from filter results
pub fn verdict(&self) -> Verdict {
// If any filter denies, deny
if self.structural.verdict == Verdict::Deny
|| self.shift.verdict == Verdict::Deny
|| self.evidence.verdict == Verdict::Deny
{
return Verdict::Deny;
}
// If any filter defers, defer
if self.structural.verdict == Verdict::Defer
|| self.shift.verdict == Verdict::Defer
|| self.evidence.verdict == Verdict::Defer
{
return Verdict::Defer;
}
// All filters permit
Verdict::Permit
}
/// Compute overall confidence (0.0 - 1.0)
pub fn confidence(&self) -> f64 {
(self.structural.confidence + self.shift.confidence + self.evidence.confidence) / 3.0
}
}
/// Result from structural (min-cut) filter
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StructuralResult {
/// Filter verdict
pub verdict: Verdict,
/// Confidence score (0.0 - 1.0)
pub confidence: f64,
/// Computed min-cut value
pub cut_value: f64,
/// Threshold used for comparison
pub threshold: f64,
/// Edges in the min-cut (boundary)
pub boundary_edges: Vec<EdgeId>,
}
/// Result from shift (drift detection) filter
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ShiftResult {
/// Filter verdict
pub verdict: Verdict,
/// Confidence score (0.0 - 1.0)
pub confidence: f64,
/// Computed shift pressure
pub shift_pressure: f64,
/// Threshold used for comparison
pub threshold: f64,
/// Regions with elevated shift
pub affected_regions: RegionMask,
}
/// Result from evidence (e-value) filter
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvidenceResult {
/// Filter verdict
pub verdict: Verdict,
/// Confidence score (0.0 - 1.0)
pub confidence: f64,
/// Computed e-value
pub e_value: f64,
/// Deny threshold (τ_deny)
pub tau_deny: f64,
/// Permit threshold (τ_permit)
pub tau_permit: f64,
}
// ═══════════════════════════════════════════════════════════════════════════
// Thresholds Configuration
// ═══════════════════════════════════════════════════════════════════════════
/// Threshold configuration for the coherence gate
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GateThresholds {
// Structural filter thresholds
/// Minimum cut value for structural stability
pub min_cut: f64,
// Shift filter thresholds
/// Maximum shift pressure before deferral
pub max_shift: f64,
// Evidence filter thresholds
/// E-value threshold for denial (below this = deny)
pub tau_deny: f64,
/// E-value threshold for permit (above this = permit)
pub tau_permit: f64,
// Timing configuration
/// Permit token TTL in nanoseconds
pub permit_ttl_ns: u64,
/// Decision budget in nanoseconds
pub decision_budget_ns: u64,
}
impl Default for GateThresholds {
fn default() -> Self {
Self {
min_cut: 5.0,
max_shift: 0.5,
tau_deny: 0.01,
tau_permit: 100.0,
permit_ttl_ns: 60_000_000_000, // 60 seconds
decision_budget_ns: 4_000, // 4 microseconds
}
}
}
/// Minimum permit TTL in nanoseconds (1 millisecond)
const MIN_PERMIT_TTL_NS: u64 = 1_000_000;
/// Maximum permit TTL in nanoseconds (1 hour)
const MAX_PERMIT_TTL_NS: u64 = 3_600_000_000_000;
/// Minimum decision budget in nanoseconds (100 nanoseconds)
const MIN_DECISION_BUDGET_NS: u64 = 100;
/// Maximum decision budget in nanoseconds (1 second)
const MAX_DECISION_BUDGET_NS: u64 = 1_000_000_000;
impl GateThresholds {
/// Validate thresholds
///
/// Checks that all threshold values are within acceptable bounds.
pub fn validate(&self) -> crate::error::Result<()> {
if self.min_cut <= 0.0 {
return Err(crate::error::RuQuError::InvalidThreshold {
name: "min_cut".to_string(),
value: self.min_cut,
constraint: "> 0".to_string(),
});
}
if self.max_shift <= 0.0 || self.max_shift > 1.0 {
return Err(crate::error::RuQuError::InvalidThreshold {
name: "max_shift".to_string(),
value: self.max_shift,
constraint: "in (0, 1]".to_string(),
});
}
if self.tau_deny <= 0.0 {
return Err(crate::error::RuQuError::InvalidThreshold {
name: "tau_deny".to_string(),
value: self.tau_deny,
constraint: "> 0".to_string(),
});
}
if self.tau_permit <= self.tau_deny {
return Err(crate::error::RuQuError::InvalidThreshold {
name: "tau_permit".to_string(),
value: self.tau_permit,
constraint: format!("> tau_deny ({})", self.tau_deny),
});
}
// SECURITY: Validate timing parameters to prevent DoS or overflow
if self.permit_ttl_ns < MIN_PERMIT_TTL_NS || self.permit_ttl_ns > MAX_PERMIT_TTL_NS {
return Err(crate::error::RuQuError::InvalidThreshold {
name: "permit_ttl_ns".to_string(),
value: self.permit_ttl_ns as f64,
constraint: format!("in [{}, {}]", MIN_PERMIT_TTL_NS, MAX_PERMIT_TTL_NS),
});
}
if self.decision_budget_ns < MIN_DECISION_BUDGET_NS
|| self.decision_budget_ns > MAX_DECISION_BUDGET_NS
{
return Err(crate::error::RuQuError::InvalidThreshold {
name: "decision_budget_ns".to_string(),
value: self.decision_budget_ns as f64,
constraint: format!(
"in [{}, {}]",
MIN_DECISION_BUDGET_NS, MAX_DECISION_BUDGET_NS
),
});
}
Ok(())
}
}
// ═══════════════════════════════════════════════════════════════════════════
// Helper modules for serde
// ═══════════════════════════════════════════════════════════════════════════
mod hex_array {
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S, const N: usize>(bytes: &[u8; N], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let hex_string: String = bytes.iter().map(|b| format!("{:02x}", b)).collect();
serializer.serialize_str(&hex_string)
}
pub fn deserialize<'de, D, const N: usize>(deserializer: D) -> Result<[u8; N], D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
// SECURITY: Validate hex string length to prevent panic on odd-length strings
if s.len() % 2 != 0 {
return Err(serde::de::Error::custom(format!(
"hex string must have even length, got {}",
s.len()
)));
}
let bytes: Vec<u8> = (0..s.len())
.step_by(2)
.map(|i| u8::from_str_radix(&s[i..i + 2], 16))
.collect::<Result<Vec<_>, _>>()
.map_err(serde::de::Error::custom)?;
bytes.try_into().map_err(|_| {
serde::de::Error::custom(format!("expected {} bytes, got {}", N, s.len() / 2))
})
}
}
// ═══════════════════════════════════════════════════════════════════════════
// Structural Signal with Dynamics
// ═══════════════════════════════════════════════════════════════════════════
/// Structural signal with cut dynamics (velocity and curvature)
///
/// This captures not just the absolute min-cut value, but also its rate of change.
/// Most early warnings come from **consistent decline** (negative velocity),
/// not just low absolute value. Tracking dynamics improves lead time without
/// increasing false alarms.
///
/// # Example
///
/// ```rust
/// use ruqu::types::StructuralSignal;
///
/// let signal = StructuralSignal {
/// cut: 4.5,
/// velocity: -0.3, // Declining
/// curvature: -0.1, // Accelerating decline
/// baseline_mean: 6.0,
/// baseline_std: 0.5,
/// };
///
/// // Warning triggers on trend, not threshold alone
/// let is_declining = signal.velocity < 0.0;
/// let is_below_baseline = signal.cut < signal.baseline_mean - 2.0 * signal.baseline_std;
/// ```
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct StructuralSignal {
/// Current min-cut value (λ)
pub cut: f64,
/// Rate of change (Δλ) - positive = improving, negative = degrading
pub velocity: f64,
/// Acceleration of change (Δ²λ) - second derivative
pub curvature: f64,
/// Baseline mean from warmup period
pub baseline_mean: f64,
/// Baseline standard deviation
pub baseline_std: f64,
}
impl StructuralSignal {
/// Check if signal indicates degradation (negative trend)
#[inline]
pub fn is_degrading(&self) -> bool {
self.velocity < 0.0
}
/// Check if signal is below adaptive threshold (μ - kσ)
#[inline]
pub fn is_below_threshold(&self, k: f64) -> bool {
self.cut < self.baseline_mean - k * self.baseline_std
}
/// Compute z-score relative to baseline
#[inline]
pub fn z_score(&self) -> f64 {
if self.baseline_std == 0.0 {
return 0.0;
}
(self.cut - self.baseline_mean) / self.baseline_std
}
/// Estimate time to threshold crossing (in cycles)
///
/// Returns `None` if not degrading or velocity is zero.
pub fn time_to_threshold(&self, threshold: f64) -> Option<f64> {
if self.velocity >= 0.0 || self.cut <= threshold {
return None;
}
Some((self.cut - threshold) / (-self.velocity))
}
}
impl std::fmt::Display for StructuralSignal {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let trend = if self.velocity > 0.1 {
""
} else if self.velocity < -0.1 {
""
} else {
""
};
write!(
f,
"λ={:.2}{} (v={:+.2}, z={:+.1}σ)",
self.cut,
trend,
self.velocity,
self.z_score()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_gate_decision() {
assert!(GateDecision::Safe.permits_action());
assert!(!GateDecision::Cautious.permits_action());
assert!(!GateDecision::Unsafe.permits_action());
assert!(!GateDecision::Safe.requires_escalation());
assert!(GateDecision::Cautious.requires_escalation());
assert!(GateDecision::Unsafe.requires_escalation());
assert!(!GateDecision::Safe.requires_quarantine());
assert!(!GateDecision::Cautious.requires_quarantine());
assert!(GateDecision::Unsafe.requires_quarantine());
}
#[test]
fn test_region_mask_basic() {
let mut mask = RegionMask::none();
assert!(mask.is_empty());
assert_eq!(mask.count(), 0);
mask.set(0);
mask.set(127);
mask.set(255);
assert_eq!(mask.count(), 3);
assert!(mask.is_set(0));
assert!(mask.is_set(127));
assert!(mask.is_set(255));
assert!(!mask.is_set(1));
mask.clear(127);
assert_eq!(mask.count(), 2);
assert!(!mask.is_set(127));
}
#[test]
fn test_region_mask_from_tiles() {
let mask = RegionMask::from_tiles(&[1, 5, 10, 200]);
assert_eq!(mask.count(), 4);
assert!(mask.is_set(1));
assert!(mask.is_set(5));
assert!(mask.is_set(10));
assert!(mask.is_set(200));
assert!(!mask.is_set(0));
}
#[test]
fn test_region_mask_operations() {
let a = RegionMask::from_tiles(&[1, 2, 3]);
let b = RegionMask::from_tiles(&[2, 3, 4]);
let union = a | b;
assert_eq!(union.count(), 4);
assert!(union.is_set(1));
assert!(union.is_set(4));
let intersection = a & b;
assert_eq!(intersection.count(), 2);
assert!(intersection.is_set(2));
assert!(intersection.is_set(3));
assert!(!intersection.is_set(1));
assert!(a.intersects(&b));
}
#[test]
fn test_region_mask_all_none() {
let all = RegionMask::all();
assert!(all.is_full());
assert_eq!(all.count(), 256);
assert!(all.is_set(0));
assert!(all.is_set(255));
let none = RegionMask::none();
assert!(none.is_empty());
assert_eq!(none.count(), 0);
let complement = !none;
assert!(complement.is_full());
}
#[test]
fn test_gate_thresholds_default() {
let thresholds = GateThresholds::default();
assert!(thresholds.validate().is_ok());
}
#[test]
fn test_gate_thresholds_invalid() {
let mut thresholds = GateThresholds::default();
thresholds.min_cut = -1.0;
assert!(thresholds.validate().is_err());
let mut thresholds = GateThresholds::default();
thresholds.tau_permit = 0.001; // Less than tau_deny
assert!(thresholds.validate().is_err());
}
#[test]
fn test_filter_results_verdict() {
let results = FilterResults {
structural: StructuralResult {
verdict: Verdict::Permit,
confidence: 1.0,
cut_value: 10.0,
threshold: 5.0,
boundary_edges: vec![],
},
shift: ShiftResult {
verdict: Verdict::Permit,
confidence: 0.9,
shift_pressure: 0.1,
threshold: 0.5,
affected_regions: RegionMask::none(),
},
evidence: EvidenceResult {
verdict: Verdict::Permit,
confidence: 0.95,
e_value: 150.0,
tau_deny: 0.01,
tau_permit: 100.0,
},
};
assert_eq!(results.verdict(), Verdict::Permit);
assert!(results.confidence() > 0.9);
}
#[test]
fn test_permit_token_validity() {
let token = PermitToken {
decision: GateDecision::Safe,
action_id: "test-action".to_string(),
region_mask: RegionMask::all(),
issued_at: 1000,
expires_at: 2000,
sequence: 0,
witness_hash: [0u8; 32],
signature: [0u8; 64],
};
assert!(token.is_valid(1500));
assert!(!token.is_valid(500));
assert!(!token.is_valid(2500));
assert_eq!(token.ttl_ns(), 1000);
}
}