Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,538 @@
//! Gate decision types, thresholds, and three-filter decision logic
//!
//! This module implements the three-filter decision process:
//! 1. Structural filter - based on min-cut analysis
//! 2. Shift filter - drift detection from expected patterns
//! 3. Evidence filter - confidence score threshold
//!
//! ## Performance Optimizations
//!
//! - VecDeque for O(1) history rotation (instead of Vec::remove(0))
//! - Inline score calculation functions
//! - Pre-computed threshold reciprocals for division optimization
//! - Early-exit evaluation order (most likely failures first)
use std::collections::VecDeque;
use serde::{Deserialize, Serialize};
use crate::supergraph::ReducedGraph;
/// Gate decision: Permit, Defer, or Deny
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum GateDecision {
/// Action is permitted - stable enough to proceed
Permit,
/// Action is deferred - uncertain, escalate to human/stronger model
Defer,
/// Action is denied - unstable or policy-violating
Deny,
}
impl std::fmt::Display for GateDecision {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GateDecision::Permit => write!(f, "permit"),
GateDecision::Defer => write!(f, "defer"),
GateDecision::Deny => write!(f, "deny"),
}
}
}
/// Evidence filter decision
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EvidenceDecision {
/// Sufficient evidence of coherence
Accept,
/// Insufficient evidence either way
Continue,
/// Strong evidence of incoherence
Reject,
}
/// Filter type in the decision process
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum DecisionFilter {
/// Min-cut based structural analysis
Structural,
/// Drift detection from patterns
Shift,
/// Confidence/evidence threshold
Evidence,
}
impl std::fmt::Display for DecisionFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DecisionFilter::Structural => write!(f, "Structural"),
DecisionFilter::Shift => write!(f, "Shift"),
DecisionFilter::Evidence => write!(f, "Evidence"),
}
}
}
/// Outcome of the three-filter decision process
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DecisionOutcome {
/// The gate decision
pub decision: GateDecision,
/// Overall confidence score (0.0 - 1.0)
pub confidence: f64,
/// Which filter rejected (if any)
pub rejected_by: Option<DecisionFilter>,
/// Reason for rejection (if rejected)
pub rejection_reason: Option<String>,
/// Structural filter score
pub structural_score: f64,
/// Shift filter score
pub shift_score: f64,
/// Evidence filter score
pub evidence_score: f64,
/// Min-cut value from structural analysis
pub mincut_value: f64,
}
impl DecisionOutcome {
/// Create a permit outcome
#[inline]
pub fn permit(
confidence: f64,
structural: f64,
shift: f64,
evidence: f64,
mincut: f64,
) -> Self {
Self {
decision: GateDecision::Permit,
confidence,
rejected_by: None,
rejection_reason: None,
structural_score: structural,
shift_score: shift,
evidence_score: evidence,
mincut_value: mincut,
}
}
/// Create a deferred outcome
#[inline]
pub fn defer(
filter: DecisionFilter,
reason: String,
structural: f64,
shift: f64,
evidence: f64,
mincut: f64,
) -> Self {
// OPTIMIZATION: Multiply by reciprocal instead of divide
let confidence = (structural + shift + evidence) * (1.0 / 3.0);
Self {
decision: GateDecision::Defer,
confidence,
rejected_by: Some(filter),
rejection_reason: Some(reason),
structural_score: structural,
shift_score: shift,
evidence_score: evidence,
mincut_value: mincut,
}
}
/// Create a denied outcome
#[inline]
pub fn deny(
filter: DecisionFilter,
reason: String,
structural: f64,
shift: f64,
evidence: f64,
mincut: f64,
) -> Self {
// OPTIMIZATION: Multiply by reciprocal instead of divide
let confidence = (structural + shift + evidence) * (1.0 / 3.0);
Self {
decision: GateDecision::Deny,
confidence,
rejected_by: Some(filter),
rejection_reason: Some(reason),
structural_score: structural,
shift_score: shift,
evidence_score: evidence,
mincut_value: mincut,
}
}
}
/// Threshold configuration for the gate
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GateThresholds {
/// E-process level indicating incoherence (default: 0.01)
pub tau_deny: f64,
/// E-process level indicating coherence (default: 100.0)
pub tau_permit: f64,
/// Minimum cut value for structural stability
pub min_cut: f64,
/// Maximum shift pressure before deferral
pub max_shift: f64,
/// Permit token TTL in nanoseconds
pub permit_ttl_ns: u64,
/// Conformal set size requiring deferral
pub theta_uncertainty: f64,
/// Conformal set size for confident permit
pub theta_confidence: f64,
}
impl Default for GateThresholds {
fn default() -> Self {
Self {
tau_deny: 0.01,
tau_permit: 100.0,
min_cut: 5.0,
max_shift: 0.5,
permit_ttl_ns: 60_000_000_000, // 60 seconds
theta_uncertainty: 20.0,
theta_confidence: 5.0,
}
}
}
/// Three-filter decision evaluator
///
/// Implements the core decision logic for the coherence gate:
/// 1. Structural filter - checks min-cut stability
/// 2. Shift filter - detects drift from baseline
/// 3. Evidence filter - validates confidence threshold
///
/// OPTIMIZATION: Uses VecDeque for O(1) history rotation instead of Vec::remove(0)
pub struct ThreeFilterDecision {
/// Gate thresholds
thresholds: GateThresholds,
/// Pre-computed reciprocals for fast division
/// OPTIMIZATION: Avoid division in hot path
inv_min_cut: f64,
inv_max_shift: f64,
inv_tau_range: f64,
/// Historical baseline for shift detection
baseline_mincut: Option<f64>,
/// Window of recent mincut values for drift detection
/// OPTIMIZATION: VecDeque for O(1) push_back and pop_front
mincut_history: VecDeque<f64>,
/// Maximum history size
history_size: usize,
}
impl ThreeFilterDecision {
/// Create a new three-filter decision evaluator
pub fn new(thresholds: GateThresholds) -> Self {
// OPTIMIZATION: Pre-compute reciprocals for fast division
let inv_min_cut = 1.0 / thresholds.min_cut;
let inv_max_shift = 1.0 / thresholds.max_shift;
let inv_tau_range = 1.0 / (thresholds.tau_permit - thresholds.tau_deny);
Self {
thresholds,
inv_min_cut,
inv_max_shift,
inv_tau_range,
baseline_mincut: None,
// OPTIMIZATION: Use VecDeque for O(1) rotation
mincut_history: VecDeque::with_capacity(100),
history_size: 100,
}
}
/// Set baseline min-cut for shift detection
#[inline]
pub fn set_baseline(&mut self, baseline: f64) {
self.baseline_mincut = Some(baseline);
}
/// Update history with a new min-cut observation
///
/// OPTIMIZATION: Uses VecDeque for O(1) push/pop instead of Vec::remove(0) which is O(n)
#[inline]
pub fn observe_mincut(&mut self, mincut: f64) {
// OPTIMIZATION: VecDeque::push_back + pop_front is O(1)
if self.mincut_history.len() >= self.history_size {
self.mincut_history.pop_front();
}
self.mincut_history.push_back(mincut);
// Update baseline if not set
if self.baseline_mincut.is_none() && !self.mincut_history.is_empty() {
self.baseline_mincut = Some(self.compute_baseline());
}
}
/// Compute baseline from history
///
/// OPTIMIZATION: Uses iterator sum for cache-friendly access
#[inline]
fn compute_baseline(&self) -> f64 {
let len = self.mincut_history.len();
if len == 0 {
return 0.0;
}
let sum: f64 = self.mincut_history.iter().sum();
sum / len as f64
}
/// Evaluate a request against the three filters
///
/// OPTIMIZATION: Uses pre-computed reciprocals for division,
/// inline score calculations, early-exit on failures
#[inline]
pub fn evaluate(&self, graph: &ReducedGraph) -> DecisionOutcome {
let mincut_value = graph.global_cut();
let shift_pressure = graph.aggregate_shift_pressure();
let e_value = graph.aggregate_evidence();
// 1. Structural Filter - Min-cut analysis
// OPTIMIZATION: Use pre-computed reciprocal
let structural_score = self.compute_structural_score(mincut_value);
if mincut_value < self.thresholds.min_cut {
return DecisionOutcome::deny(
DecisionFilter::Structural,
format!(
"Min-cut {:.3} below threshold {:.3}",
mincut_value, self.thresholds.min_cut
),
structural_score,
0.0,
0.0,
mincut_value,
);
}
// 2. Shift Filter - Drift detection
// OPTIMIZATION: Use pre-computed reciprocal
let shift_score = self.compute_shift_score(shift_pressure);
if shift_pressure >= self.thresholds.max_shift {
return DecisionOutcome::defer(
DecisionFilter::Shift,
format!(
"Shift pressure {:.3} exceeds threshold {:.3}",
shift_pressure, self.thresholds.max_shift
),
structural_score,
shift_score,
0.0,
mincut_value,
);
}
// 3. Evidence Filter - E-value threshold
// OPTIMIZATION: Use pre-computed reciprocal
let evidence_score = self.compute_evidence_score(e_value);
if e_value < self.thresholds.tau_deny {
return DecisionOutcome::deny(
DecisionFilter::Evidence,
format!(
"E-value {:.3} below denial threshold {:.3}",
e_value, self.thresholds.tau_deny
),
structural_score,
shift_score,
evidence_score,
mincut_value,
);
}
if e_value < self.thresholds.tau_permit {
return DecisionOutcome::defer(
DecisionFilter::Evidence,
format!(
"E-value {:.3} below permit threshold {:.3}",
e_value, self.thresholds.tau_permit
),
structural_score,
shift_score,
evidence_score,
mincut_value,
);
}
// All filters passed
// OPTIMIZATION: Multiply by reciprocal
let confidence = (structural_score + shift_score + evidence_score) * (1.0 / 3.0);
DecisionOutcome::permit(
confidence,
structural_score,
shift_score,
evidence_score,
mincut_value,
)
}
/// Compute structural score from min-cut value
///
/// OPTIMIZATION: Uses pre-computed reciprocal, marked inline(always)
#[inline(always)]
fn compute_structural_score(&self, mincut_value: f64) -> f64 {
if mincut_value >= self.thresholds.min_cut {
1.0
} else {
// OPTIMIZATION: Multiply by reciprocal instead of divide
mincut_value * self.inv_min_cut
}
}
/// Compute shift score from shift pressure
///
/// OPTIMIZATION: Uses pre-computed reciprocal, marked inline(always)
#[inline(always)]
fn compute_shift_score(&self, shift_pressure: f64) -> f64 {
// OPTIMIZATION: Multiply by reciprocal, use f64::min for branchless
1.0 - (shift_pressure * self.inv_max_shift).min(1.0)
}
/// Compute evidence score from e-value
///
/// OPTIMIZATION: Uses pre-computed reciprocal, marked inline(always)
#[inline(always)]
fn compute_evidence_score(&self, e_value: f64) -> f64 {
if e_value >= self.thresholds.tau_permit {
1.0
} else if e_value <= self.thresholds.tau_deny {
0.0
} else {
// OPTIMIZATION: Multiply by reciprocal
(e_value - self.thresholds.tau_deny) * self.inv_tau_range
}
}
/// Get current thresholds
#[inline]
pub fn thresholds(&self) -> &GateThresholds {
&self.thresholds
}
/// Get history size
#[inline(always)]
pub fn history_len(&self) -> usize {
self.mincut_history.len()
}
/// Get current baseline
#[inline(always)]
pub fn baseline(&self) -> Option<f64> {
self.baseline_mincut
}
/// Update thresholds and recompute reciprocals
///
/// OPTIMIZATION: Recomputes cached reciprocals when thresholds change
pub fn update_thresholds(&mut self, thresholds: GateThresholds) {
self.inv_min_cut = 1.0 / thresholds.min_cut;
self.inv_max_shift = 1.0 / thresholds.max_shift;
self.inv_tau_range = 1.0 / (thresholds.tau_permit - thresholds.tau_deny);
self.thresholds = thresholds;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_gate_decision_display() {
assert_eq!(GateDecision::Permit.to_string(), "permit");
assert_eq!(GateDecision::Defer.to_string(), "defer");
assert_eq!(GateDecision::Deny.to_string(), "deny");
}
#[test]
fn test_default_thresholds() {
let thresholds = GateThresholds::default();
assert_eq!(thresholds.tau_deny, 0.01);
assert_eq!(thresholds.tau_permit, 100.0);
assert_eq!(thresholds.min_cut, 5.0);
}
#[test]
fn test_three_filter_decision() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
// Default graph should permit
let graph = ReducedGraph::new();
let outcome = decision.evaluate(&graph);
// Default graph has high coherence, should permit
assert_eq!(outcome.decision, GateDecision::Permit);
}
#[test]
fn test_structural_denial() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
let mut graph = ReducedGraph::new();
graph.set_global_cut(1.0); // Below min_cut of 5.0
let outcome = decision.evaluate(&graph);
assert_eq!(outcome.decision, GateDecision::Deny);
assert_eq!(outcome.rejected_by, Some(DecisionFilter::Structural));
}
#[test]
fn test_shift_deferral() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
let mut graph = ReducedGraph::new();
graph.set_shift_pressure(0.8); // Above max_shift of 0.5
let outcome = decision.evaluate(&graph);
assert_eq!(outcome.decision, GateDecision::Defer);
assert_eq!(outcome.rejected_by, Some(DecisionFilter::Shift));
}
#[test]
fn test_evidence_deferral() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
let mut graph = ReducedGraph::new();
graph.set_evidence(50.0); // Between tau_deny (0.01) and tau_permit (100.0)
let outcome = decision.evaluate(&graph);
assert_eq!(outcome.decision, GateDecision::Defer);
assert_eq!(outcome.rejected_by, Some(DecisionFilter::Evidence));
}
#[test]
fn test_decision_outcome_creation() {
let outcome = DecisionOutcome::permit(0.95, 1.0, 0.9, 0.95, 10.0);
assert_eq!(outcome.decision, GateDecision::Permit);
assert!(outcome.confidence > 0.9);
assert!(outcome.rejected_by.is_none());
}
#[test]
fn test_decision_filter_display() {
assert_eq!(DecisionFilter::Structural.to_string(), "Structural");
assert_eq!(DecisionFilter::Shift.to_string(), "Shift");
assert_eq!(DecisionFilter::Evidence.to_string(), "Evidence");
}
#[test]
fn test_baseline_observation() {
let thresholds = GateThresholds::default();
let mut decision = ThreeFilterDecision::new(thresholds);
assert!(decision.baseline().is_none());
decision.observe_mincut(10.0);
decision.observe_mincut(12.0);
decision.observe_mincut(8.0);
assert!(decision.baseline().is_some());
assert_eq!(decision.history_len(), 3);
}
}

View File

@@ -0,0 +1,247 @@
//! Evidence accumulation and filtering
use serde::{Deserialize, Serialize};
/// Aggregated evidence from all tiles
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AggregatedEvidence {
/// Total accumulated e-value
pub e_value: f64,
/// Number of tiles contributing
pub tile_count: usize,
/// Minimum e-value across tiles
pub min_e_value: f64,
/// Maximum e-value across tiles
pub max_e_value: f64,
}
impl AggregatedEvidence {
/// Create empty evidence
pub fn empty() -> Self {
Self {
e_value: 1.0,
tile_count: 0,
min_e_value: f64::INFINITY,
max_e_value: f64::NEG_INFINITY,
}
}
/// Add evidence from a tile
pub fn add(&mut self, e_value: f64) {
self.e_value *= e_value;
self.tile_count += 1;
self.min_e_value = self.min_e_value.min(e_value);
self.max_e_value = self.max_e_value.max(e_value);
}
}
/// Evidence filter for e-process evaluation
///
/// OPTIMIZATION: Uses multiplicative update for O(1) current value maintenance
/// instead of O(n) product computation.
pub struct EvidenceFilter {
/// Rolling e-value history (ring buffer)
history: Vec<f64>,
/// Current position in ring buffer
position: usize,
/// Capacity of ring buffer
capacity: usize,
/// Current accumulated value (maintained incrementally)
current: f64,
/// Log-space accumulator for numerical stability
log_current: f64,
}
impl EvidenceFilter {
/// Create a new evidence filter with given capacity
pub fn new(capacity: usize) -> Self {
Self {
history: Vec::with_capacity(capacity),
position: 0,
capacity,
current: 1.0,
log_current: 0.0,
}
}
/// Update with a new e-value
///
/// OPTIMIZATION: Uses multiplicative update for O(1) complexity
/// instead of O(n) product recomputation. Falls back to full
/// recomputation periodically to prevent numerical drift.
pub fn update(&mut self, e_value: f64) {
// Bound to prevent overflow/underflow
let bounded = e_value.clamp(1e-10, 1e10);
let log_bounded = bounded.ln();
if self.history.len() < self.capacity {
// Growing phase: just accumulate
self.history.push(bounded);
self.log_current += log_bounded;
} else {
// Ring buffer phase: multiplicative update
let old_value = self.history[self.position];
let old_log = old_value.ln();
self.history[self.position] = bounded;
self.log_current = self.log_current - old_log + log_bounded;
}
self.position = (self.position + 1) % self.capacity;
// Convert from log-space
self.current = self.log_current.exp();
// Periodic full recomputation for numerical stability (every 64 updates)
if self.position == 0 {
self.recompute_current();
}
}
/// Recompute current value from history (for stability)
#[inline]
fn recompute_current(&mut self) {
self.log_current = self.history.iter().map(|x| x.ln()).sum();
self.current = self.log_current.exp();
}
/// Get current accumulated e-value
#[inline]
pub fn current(&self) -> f64 {
self.current
}
/// Get the history of e-values
pub fn history(&self) -> &[f64] {
&self.history
}
/// Compute product using SIMD-friendly parallel lanes
///
/// OPTIMIZATION: Uses log-space arithmetic with parallel accumulators
/// for better numerical stability and vectorization.
pub fn current_simd(&self) -> f64 {
if self.history.is_empty() {
return 1.0;
}
// Use 4 parallel lanes for potential SIMD vectorization
let mut log_lanes = [0.0f64; 4];
for (i, &val) in self.history.iter().enumerate() {
log_lanes[i % 4] += val.ln();
}
let log_sum = log_lanes[0] + log_lanes[1] + log_lanes[2] + log_lanes[3];
log_sum.exp()
}
}
/// Aggregate 255 tile e-values using SIMD-friendly patterns
///
/// OPTIMIZATION: Uses parallel lane accumulation in log-space
/// for numerical stability when combining many e-values.
///
/// # Arguments
/// * `tile_e_values` - Slice of e-values from worker tiles
///
/// # Returns
/// Aggregated e-value (product in log-space)
pub fn aggregate_tiles_simd(tile_e_values: &[f64]) -> f64 {
if tile_e_values.is_empty() {
return 1.0;
}
// Use 8 parallel lanes for 256-bit SIMD (AVX2)
let mut log_lanes = [0.0f64; 8];
// Process in chunks of 8
let chunks = tile_e_values.chunks_exact(8);
let remainder = chunks.remainder();
for chunk in chunks {
log_lanes[0] += chunk[0].ln();
log_lanes[1] += chunk[1].ln();
log_lanes[2] += chunk[2].ln();
log_lanes[3] += chunk[3].ln();
log_lanes[4] += chunk[4].ln();
log_lanes[5] += chunk[5].ln();
log_lanes[6] += chunk[6].ln();
log_lanes[7] += chunk[7].ln();
}
// Handle remainder
for (i, &val) in remainder.iter().enumerate() {
log_lanes[i % 8] += val.ln();
}
// Tree reduction
let sum_0_3 = log_lanes[0] + log_lanes[1] + log_lanes[2] + log_lanes[3];
let sum_4_7 = log_lanes[4] + log_lanes[5] + log_lanes[6] + log_lanes[7];
(sum_0_3 + sum_4_7).exp()
}
/// Compute mixture e-value with adaptive precision
///
/// OPTIMIZATION: Uses different precision strategies based on
/// the magnitude of accumulated evidence for optimal performance.
///
/// # Arguments
/// * `log_e_values` - Log e-values from tiles
/// * `weights` - Optional tile weights (None = uniform)
///
/// # Returns
/// Weighted geometric mean of e-values
pub fn mixture_evalue_adaptive(log_e_values: &[f64], weights: Option<&[f64]>) -> f64 {
if log_e_values.is_empty() {
return 1.0;
}
let total: f64 = match weights {
Some(w) => {
// Weighted sum in log-space
log_e_values
.iter()
.zip(w.iter())
.map(|(&log_e, &weight)| log_e * weight)
.sum()
}
None => {
// Uniform weights - use SIMD pattern
let mut lanes = [0.0f64; 4];
for (i, &log_e) in log_e_values.iter().enumerate() {
lanes[i % 4] += log_e;
}
(lanes[0] + lanes[1] + lanes[2] + lanes[3]) / log_e_values.len() as f64
}
};
total.exp()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_aggregated_evidence() {
let mut evidence = AggregatedEvidence::empty();
evidence.add(2.0);
evidence.add(3.0);
assert_eq!(evidence.e_value, 6.0);
assert_eq!(evidence.tile_count, 2);
assert_eq!(evidence.min_e_value, 2.0);
assert_eq!(evidence.max_e_value, 3.0);
}
#[test]
fn test_evidence_filter() {
let mut filter = EvidenceFilter::new(10);
filter.update(2.0);
filter.update(2.0);
assert_eq!(filter.current(), 4.0);
}
}

View File

@@ -0,0 +1,393 @@
//! cognitum-gate-tilezero: TileZero arbiter for the Anytime-Valid Coherence Gate
//!
//! TileZero acts as the central arbiter in the 256-tile WASM fabric, responsible for:
//! - Merging worker tile reports into a supergraph
//! - Making global gate decisions (Permit/Defer/Deny)
//! - Issuing cryptographically signed permit tokens
//! - Maintaining a hash-chained witness receipt log
pub mod decision;
pub mod evidence;
pub mod merge;
pub mod permit;
pub mod receipt;
pub mod supergraph;
pub use decision::{
DecisionFilter, DecisionOutcome, EvidenceDecision, GateDecision, GateThresholds,
ThreeFilterDecision,
};
pub use evidence::{AggregatedEvidence, EvidenceFilter};
pub use merge::{MergeStrategy, MergedReport, ReportMerger, WorkerReport};
pub use permit::{PermitState, PermitToken, TokenDecodeError, Verifier, VerifyError};
pub use receipt::{ReceiptLog, TimestampProof, WitnessReceipt, WitnessSummary};
pub use supergraph::{ReducedGraph, ShiftPressure, StructuralFilter};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use tokio::sync::RwLock;
/// Action identifier
pub type ActionId = String;
/// Vertex identifier in the coherence graph
pub type VertexId = u64;
/// Edge identifier in the coherence graph
pub type EdgeId = u64;
/// Worker tile identifier (1-255, with 0 reserved for TileZero)
pub type TileId = u8;
/// Context for an action being evaluated by the gate
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionContext {
/// Unique identifier for this action
pub action_id: ActionId,
/// Type of action (e.g., "config_change", "api_call")
pub action_type: String,
/// Target of the action
pub target: ActionTarget,
/// Additional context
pub context: ActionMetadata,
}
/// Target of an action
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionTarget {
/// Target device/resource
pub device: Option<String>,
/// Target path
pub path: Option<String>,
/// Additional target properties
#[serde(flatten)]
pub extra: HashMap<String, serde_json::Value>,
}
/// Metadata about the action context
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionMetadata {
/// Agent requesting the action
pub agent_id: String,
/// Session identifier
pub session_id: Option<String>,
/// Prior related actions
#[serde(default)]
pub prior_actions: Vec<ActionId>,
/// Urgency level
#[serde(default = "default_urgency")]
pub urgency: String,
}
fn default_urgency() -> String {
"normal".to_string()
}
/// Report from a worker tile
#[repr(C, align(64))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TileReport {
/// Tile identifier (1-255)
pub tile_id: TileId,
/// Local coherence score
pub coherence: f32,
/// Whether boundary has moved since last report
pub boundary_moved: bool,
/// Top suspicious edges
pub suspicious_edges: Vec<EdgeId>,
/// Local e-value accumulator
pub e_value: f32,
/// Witness fragment for boundary changes
pub witness_fragment: Option<WitnessFragment>,
}
/// Fragment of witness data from a worker tile
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WitnessFragment {
/// Tile that generated this fragment
pub tile_id: TileId,
/// Boundary edges in this shard
pub boundary_edges: Vec<EdgeId>,
/// Local cut value
pub cut_value: f32,
}
/// Escalation information for DEFER decisions
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EscalationInfo {
/// Who to escalate to
pub to: String,
/// URL for context
pub context_url: String,
/// Timeout in nanoseconds
pub timeout_ns: u64,
/// Default action on timeout
#[serde(default = "default_timeout_action")]
pub default_on_timeout: String,
}
fn default_timeout_action() -> String {
"deny".to_string()
}
/// TileZero: The central arbiter of the coherence gate
pub struct TileZero {
/// Reduced supergraph from worker summaries
supergraph: RwLock<ReducedGraph>,
/// Canonical permit token state
permit_state: PermitState,
/// Hash-chained witness receipt log
receipt_log: RwLock<ReceiptLog>,
/// Threshold configuration
thresholds: GateThresholds,
/// Sequence counter
sequence: AtomicU64,
}
impl TileZero {
/// Create a new TileZero arbiter
pub fn new(thresholds: GateThresholds) -> Self {
Self {
supergraph: RwLock::new(ReducedGraph::new()),
permit_state: PermitState::new(),
receipt_log: RwLock::new(ReceiptLog::new()),
thresholds,
sequence: AtomicU64::new(0),
}
}
/// Collect reports from all worker tiles
pub async fn collect_reports(&self, reports: &[TileReport]) {
let mut graph = self.supergraph.write().await;
for report in reports {
if report.boundary_moved {
if let Some(ref fragment) = report.witness_fragment {
graph.update_from_fragment(fragment);
}
}
graph.update_coherence(report.tile_id, report.coherence);
}
}
/// Make a gate decision for an action
pub async fn decide(&self, action_ctx: &ActionContext) -> PermitToken {
let seq = self.sequence.fetch_add(1, Ordering::SeqCst);
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64;
let graph = self.supergraph.read().await;
// Three stacked filters:
// 1. Structural filter (global cut on reduced graph)
let structural_ok = graph.global_cut() >= self.thresholds.min_cut;
// 2. Shift filter (aggregated shift pressure)
let shift_pressure = graph.aggregate_shift_pressure();
let shift_ok = shift_pressure < self.thresholds.max_shift;
// 3. Evidence filter
let e_aggregate = graph.aggregate_evidence();
let evidence_decision = self.evidence_decision(e_aggregate);
// Combined decision
let decision = match (structural_ok, shift_ok, evidence_decision) {
(false, _, _) => GateDecision::Deny,
(_, false, _) => GateDecision::Defer,
(_, _, EvidenceDecision::Reject) => GateDecision::Deny,
(_, _, EvidenceDecision::Continue) => GateDecision::Defer,
(true, true, EvidenceDecision::Accept) => GateDecision::Permit,
};
// Compute witness hash
let witness_summary = graph.witness_summary();
let witness_hash = witness_summary.hash();
drop(graph);
// Create token
let token = PermitToken {
decision,
action_id: action_ctx.action_id.clone(),
timestamp: now,
ttl_ns: self.thresholds.permit_ttl_ns,
witness_hash,
sequence: seq,
signature: [0u8; 64], // Will be filled by sign
};
// Sign the token
let signed_token = self.permit_state.sign_token(token);
// Emit receipt
self.emit_receipt(&signed_token, &witness_summary).await;
signed_token
}
/// Get evidence decision based on accumulated e-value
fn evidence_decision(&self, e_aggregate: f64) -> EvidenceDecision {
if e_aggregate < self.thresholds.tau_deny {
EvidenceDecision::Reject
} else if e_aggregate >= self.thresholds.tau_permit {
EvidenceDecision::Accept
} else {
EvidenceDecision::Continue
}
}
/// Emit a witness receipt
async fn emit_receipt(&self, token: &PermitToken, summary: &WitnessSummary) {
let mut log = self.receipt_log.write().await;
let previous_hash = log.last_hash();
let receipt = WitnessReceipt {
sequence: token.sequence,
token: token.clone(),
previous_hash,
witness_summary: summary.clone(),
timestamp_proof: TimestampProof {
timestamp: token.timestamp,
previous_receipt_hash: previous_hash,
merkle_root: [0u8; 32], // Simplified for v0
},
};
log.append(receipt);
}
/// Get a receipt by sequence number
pub async fn get_receipt(&self, sequence: u64) -> Option<WitnessReceipt> {
let log = self.receipt_log.read().await;
log.get(sequence).cloned()
}
/// Verify the hash chain up to a sequence number
pub async fn verify_chain_to(&self, sequence: u64) -> Result<(), ChainVerifyError> {
let log = self.receipt_log.read().await;
log.verify_chain_to(sequence)
}
/// Replay a decision for audit purposes
pub async fn replay(&self, receipt: &WitnessReceipt) -> ReplayResult {
// In a full implementation, this would reconstruct state from checkpoints
// For now, return the original decision
ReplayResult {
decision: receipt.token.decision,
state_snapshot: receipt.witness_summary.clone(),
}
}
/// Get the verifier for token validation
pub fn verifier(&self) -> Verifier {
self.permit_state.verifier()
}
/// Get the thresholds configuration
pub fn thresholds(&self) -> &GateThresholds {
&self.thresholds
}
/// Verify the entire receipt chain
pub async fn verify_receipt_chain(&self) -> Result<(), ChainVerifyError> {
let log = self.receipt_log.read().await;
let len = log.len();
if len == 0 {
return Ok(());
}
log.verify_chain_to(len as u64 - 1)
}
/// Export all receipts as JSON
pub async fn export_receipts_json(&self) -> Result<String, serde_json::Error> {
let log = self.receipt_log.read().await;
let receipts: Vec<&WitnessReceipt> = log.iter().collect();
serde_json::to_string_pretty(&receipts)
}
}
/// Result of replaying a decision
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplayResult {
/// The replayed decision
pub decision: GateDecision,
/// State snapshot at decision time
pub state_snapshot: WitnessSummary,
}
/// Error during chain verification
#[derive(Debug, thiserror::Error)]
pub enum ChainVerifyError {
#[error("Receipt {sequence} not found")]
ReceiptNotFound { sequence: u64 },
#[error("Hash mismatch at sequence {sequence}")]
HashMismatch { sequence: u64 },
#[error("Signature verification failed at sequence {sequence}")]
SignatureInvalid { sequence: u64 },
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_tilezero_basic_permit() {
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
let ctx = ActionContext {
action_id: "test-action-1".to_string(),
action_type: "config_change".to_string(),
target: ActionTarget {
device: Some("router-1".to_string()),
path: Some("/config".to_string()),
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: "agent-1".to_string(),
session_id: Some("session-1".to_string()),
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
let token = tilezero.decide(&ctx).await;
assert_eq!(token.sequence, 0);
assert!(!token.action_id.is_empty());
}
#[tokio::test]
async fn test_receipt_chain() {
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
let ctx = ActionContext {
action_id: "test-action-1".to_string(),
action_type: "config_change".to_string(),
target: ActionTarget {
device: None,
path: None,
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: "agent-1".to_string(),
session_id: None,
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
// Generate multiple decisions
let _token1 = tilezero.decide(&ctx).await;
let _token2 = tilezero.decide(&ctx).await;
// Verify receipts exist
let receipt0 = tilezero.get_receipt(0).await;
assert!(receipt0.is_some());
let receipt1 = tilezero.get_receipt(1).await;
assert!(receipt1.is_some());
}
}

View File

@@ -0,0 +1,609 @@
//! Report merging from 255 worker tiles
//!
//! This module handles aggregating partial graph reports from worker tiles
//! into a unified view for supergraph construction.
//!
//! ## Performance Optimizations
//!
//! - Pre-allocated HashMaps with expected capacity (255 workers)
//! - Inline functions for merge strategies
//! - Iterator-based processing to avoid allocations
//! - Sorted slices with binary search for median calculation
//! - Capacity hints for all collections
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use crate::TileId;
/// Expected number of worker tiles for capacity pre-allocation
const EXPECTED_WORKERS: usize = 255;
/// Expected nodes per worker for capacity hints
const EXPECTED_NODES_PER_WORKER: usize = 16;
/// Expected boundary edges per worker
const EXPECTED_EDGES_PER_WORKER: usize = 32;
/// Epoch identifier for report sequencing
pub type Epoch = u64;
/// Transaction identifier (32-byte hash)
pub type TxId = [u8; 32];
/// Errors during report merging
#[derive(Debug, Clone)]
pub enum MergeError {
/// Empty report set
EmptyReports,
/// Conflicting epochs in reports
ConflictingEpochs,
/// Invalid edge weight
InvalidWeight(String),
/// Node not found
NodeNotFound(String),
}
impl std::fmt::Display for MergeError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MergeError::EmptyReports => write!(f, "Empty report set"),
MergeError::ConflictingEpochs => write!(f, "Conflicting epochs in reports"),
MergeError::InvalidWeight(msg) => write!(f, "Invalid edge weight: {}", msg),
MergeError::NodeNotFound(id) => write!(f, "Node not found: {}", id),
}
}
}
impl std::error::Error for MergeError {}
/// Strategy for merging overlapping data from multiple workers
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum MergeStrategy {
/// Simple average of all values
SimpleAverage,
/// Weighted average by tile confidence
WeightedAverage,
/// Take the median value
Median,
/// Take the maximum value (conservative)
Maximum,
/// Byzantine fault tolerant (2/3 agreement)
ByzantineFaultTolerant,
}
/// A node summary from a worker tile
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeSummary {
/// Node identifier
pub id: String,
/// Aggregated weight/importance
pub weight: f64,
/// Number of edges in worker's partition
pub edge_count: usize,
/// Local coherence score
pub coherence: f64,
}
/// An edge summary from a worker tile (for boundary edges)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EdgeSummary {
/// Source node ID
pub source: String,
/// Target node ID
pub target: String,
/// Edge capacity/weight
pub capacity: f64,
/// Is this a boundary edge (crosses tile partitions)?
pub is_boundary: bool,
}
/// Report from a worker tile containing partition summary
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct WorkerReport {
/// Tile identifier (1-255)
pub tile_id: TileId,
/// Epoch this report belongs to
pub epoch: Epoch,
/// Timestamp when report was generated (unix millis)
pub timestamp_ms: u64,
/// Transactions processed in this partition
pub transactions: Vec<TxId>,
/// Node summaries for super-nodes
pub nodes: Vec<NodeSummary>,
/// Boundary edge summaries
pub boundary_edges: Vec<EdgeSummary>,
/// Local min-cut value (within partition)
pub local_mincut: f64,
/// Worker's confidence in this report (0.0-1.0)
pub confidence: f64,
/// Hash of the worker's local state
pub state_hash: [u8; 32],
}
impl WorkerReport {
/// Create a new worker report
pub fn new(tile_id: TileId, epoch: Epoch) -> Self {
Self {
tile_id,
epoch,
timestamp_ms: 0,
transactions: Vec::new(),
nodes: Vec::new(),
boundary_edges: Vec::new(),
local_mincut: 0.0,
confidence: 1.0,
state_hash: [0u8; 32],
}
}
/// Add a node summary
pub fn add_node(&mut self, node: NodeSummary) {
self.nodes.push(node);
}
/// Add a boundary edge
pub fn add_boundary_edge(&mut self, edge: EdgeSummary) {
self.boundary_edges.push(edge);
}
/// Compute state hash using blake3
pub fn compute_state_hash(&mut self) {
let mut hasher = blake3::Hasher::new();
hasher.update(&self.tile_id.to_le_bytes());
hasher.update(&self.epoch.to_le_bytes());
for node in &self.nodes {
hasher.update(node.id.as_bytes());
hasher.update(&node.weight.to_le_bytes());
}
for edge in &self.boundary_edges {
hasher.update(edge.source.as_bytes());
hasher.update(edge.target.as_bytes());
hasher.update(&edge.capacity.to_le_bytes());
}
self.state_hash = *hasher.finalize().as_bytes();
}
}
/// Merged report combining data from multiple workers
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergedReport {
/// Epoch of the merged report
pub epoch: Epoch,
/// Number of worker reports merged
pub worker_count: usize,
/// Merged super-nodes (aggregated from all workers)
pub super_nodes: HashMap<String, MergedNode>,
/// Merged boundary edges
pub boundary_edges: Vec<MergedEdge>,
/// Global min-cut estimate
pub global_mincut_estimate: f64,
/// Overall confidence (aggregated)
pub confidence: f64,
/// Merge strategy used
pub strategy: MergeStrategy,
}
/// A merged super-node aggregated from multiple workers
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergedNode {
/// Node identifier
pub id: String,
/// Aggregated weight
pub weight: f64,
/// Total edge count across workers
pub total_edge_count: usize,
/// Average coherence
pub avg_coherence: f64,
/// Contributing worker tiles
pub contributors: Vec<TileId>,
}
/// A merged edge aggregated from boundary reports
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergedEdge {
/// Source node
pub source: String,
/// Target node
pub target: String,
/// Aggregated capacity
pub capacity: f64,
/// Number of workers reporting this edge
pub report_count: usize,
}
/// Report merger that combines worker reports
///
/// OPTIMIZATION: Uses capacity hints and inline functions for better performance
pub struct ReportMerger {
strategy: MergeStrategy,
/// Pre-allocated scratch buffer for weight calculations
/// OPTIMIZATION: Reuse allocation across merge operations
scratch_weights: Vec<f64>,
}
impl ReportMerger {
/// Create a new report merger with given strategy
#[inline]
pub fn new(strategy: MergeStrategy) -> Self {
Self {
strategy,
// Pre-allocate scratch buffer with expected capacity
scratch_weights: Vec::with_capacity(EXPECTED_WORKERS),
}
}
/// Merge multiple worker reports into a unified view
///
/// OPTIMIZATION: Pre-allocates all collections with expected capacity
pub fn merge(&self, reports: &[WorkerReport]) -> Result<MergedReport, MergeError> {
if reports.is_empty() {
return Err(MergeError::EmptyReports);
}
// Verify all reports are from the same epoch
// OPTIMIZATION: Use first() and fold for short-circuit evaluation
let epoch = reports[0].epoch;
for r in reports.iter().skip(1) {
if r.epoch != epoch {
return Err(MergeError::ConflictingEpochs);
}
}
// Merge nodes - pre-allocate based on expected size
let super_nodes = self.merge_nodes(reports)?;
// Merge boundary edges
let boundary_edges = self.merge_edges(reports)?;
// Compute global min-cut estimate
let global_mincut_estimate = self.estimate_global_mincut(reports);
// Compute aggregated confidence
let confidence = self.aggregate_confidence(reports);
Ok(MergedReport {
epoch,
worker_count: reports.len(),
super_nodes,
boundary_edges,
global_mincut_estimate,
confidence,
strategy: self.strategy,
})
}
/// Merge node summaries from all workers
///
/// OPTIMIZATION: Pre-allocates HashMap with expected capacity
#[inline]
fn merge_nodes(
&self,
reports: &[WorkerReport],
) -> Result<HashMap<String, MergedNode>, MergeError> {
// OPTIMIZATION: Estimate total nodes across all reports
let estimated_nodes = reports.len() * EXPECTED_NODES_PER_WORKER;
let mut node_data: HashMap<String, Vec<(TileId, &NodeSummary)>> =
HashMap::with_capacity(estimated_nodes);
// Collect all node data
for report in reports {
for node in &report.nodes {
node_data
.entry(node.id.clone())
.or_insert_with(|| Vec::with_capacity(reports.len()))
.push((report.tile_id, node));
}
}
// Merge each node
// OPTIMIZATION: Pre-allocate result HashMap
let mut merged = HashMap::with_capacity(node_data.len());
for (id, data) in node_data {
let merged_node = self.merge_single_node(&id, &data)?;
merged.insert(id, merged_node);
}
Ok(merged)
}
/// Merge a single node's data from multiple workers
///
/// OPTIMIZATION: Uses inline strategy functions and avoids repeated allocations
#[inline]
fn merge_single_node(
&self,
id: &str,
data: &[(TileId, &NodeSummary)],
) -> Result<MergedNode, MergeError> {
// OPTIMIZATION: Pre-allocate with exact capacity
let mut contributors: Vec<TileId> = Vec::with_capacity(data.len());
contributors.extend(data.iter().map(|(tile, _)| *tile));
let total_edge_count: usize = data.iter().map(|(_, n)| n.edge_count).sum();
let len = data.len();
let len_f64 = len as f64;
let weight = match self.strategy {
MergeStrategy::SimpleAverage => {
// OPTIMIZATION: Single pass sum
let sum: f64 = data.iter().map(|(_, n)| n.weight).sum();
sum / len_f64
}
MergeStrategy::WeightedAverage => {
// OPTIMIZATION: Single pass for both sums
let (weighted_sum, coherence_sum) =
data.iter().fold((0.0, 0.0), |(ws, cs), (_, n)| {
(ws + n.weight * n.coherence, cs + n.coherence)
});
if coherence_sum > 0.0 {
weighted_sum / coherence_sum
} else {
0.0
}
}
MergeStrategy::Median => {
// OPTIMIZATION: Inline median calculation
Self::compute_median(data.iter().map(|(_, n)| n.weight))
}
MergeStrategy::Maximum => {
// OPTIMIZATION: Use fold without intermediate iterator
data.iter()
.map(|(_, n)| n.weight)
.fold(f64::NEG_INFINITY, f64::max)
}
MergeStrategy::ByzantineFaultTolerant => {
// OPTIMIZATION: BFT with inline median of 2/3
Self::compute_bft_weight(data.iter().map(|(_, n)| n.weight), len)
}
};
// OPTIMIZATION: Single pass for coherence average
let avg_coherence = data.iter().map(|(_, n)| n.coherence).sum::<f64>() / len_f64;
Ok(MergedNode {
id: id.to_string(),
weight,
total_edge_count,
avg_coherence,
contributors,
})
}
/// Compute median of an iterator of f64 values
///
/// OPTIMIZATION: Inline function to avoid heap allocation overhead
#[inline]
fn compute_median<I: Iterator<Item = f64>>(iter: I) -> f64 {
let mut weights: Vec<f64> = iter.collect();
let len = weights.len();
if len == 0 {
return 0.0;
}
// OPTIMIZATION: Use unstable sort for f64 (faster, no stability needed)
weights.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let mid = len / 2;
if len % 2 == 0 {
// SAFETY: mid > 0 when len >= 2 and even
(weights[mid - 1] + weights[mid]) * 0.5
} else {
weights[mid]
}
}
/// Compute Byzantine Fault Tolerant weight (median of top 2/3)
///
/// OPTIMIZATION: Inline function with optimized threshold calculation
#[inline]
fn compute_bft_weight<I: Iterator<Item = f64>>(iter: I, len: usize) -> f64 {
let mut weights: Vec<f64> = iter.collect();
if weights.is_empty() {
return 0.0;
}
weights.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
// 2/3 threshold
let threshold = (len * 2) / 3;
if threshold > 0 {
let sum: f64 = weights.iter().take(threshold).sum();
sum / threshold as f64
} else {
weights[0]
}
}
/// Merge boundary edges from all workers
///
/// OPTIMIZATION: Pre-allocates collections, uses inline merge strategies
#[inline]
fn merge_edges(&self, reports: &[WorkerReport]) -> Result<Vec<MergedEdge>, MergeError> {
// OPTIMIZATION: Pre-allocate with expected capacity
let estimated_edges = reports.len() * EXPECTED_EDGES_PER_WORKER;
let mut edge_data: HashMap<(String, String), Vec<f64>> =
HashMap::with_capacity(estimated_edges);
// Collect all edge data
for report in reports {
for edge in &report.boundary_edges {
if edge.is_boundary {
// Normalize edge key (smaller first for undirected)
// OPTIMIZATION: Avoid unnecessary clones by checking order first
let key = if edge.source <= edge.target {
(edge.source.clone(), edge.target.clone())
} else {
(edge.target.clone(), edge.source.clone())
};
edge_data
.entry(key)
.or_insert_with(|| Vec::with_capacity(reports.len()))
.push(edge.capacity);
}
}
}
// Merge each edge
// OPTIMIZATION: Pre-allocate result vector
let mut merged = Vec::with_capacity(edge_data.len());
for ((source, target), capacities) in edge_data {
let len = capacities.len();
let capacity = self.merge_capacities(&capacities, len);
merged.push(MergedEdge {
source,
target,
capacity,
report_count: len,
});
}
Ok(merged)
}
/// Merge capacities according to strategy
///
/// OPTIMIZATION: Inline function to avoid match overhead in loop
#[inline(always)]
fn merge_capacities(&self, capacities: &[f64], len: usize) -> f64 {
match self.strategy {
MergeStrategy::SimpleAverage | MergeStrategy::WeightedAverage => {
capacities.iter().sum::<f64>() / len as f64
}
MergeStrategy::Median => Self::compute_median(capacities.iter().copied()),
MergeStrategy::Maximum => capacities.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b)),
MergeStrategy::ByzantineFaultTolerant => {
Self::compute_bft_weight(capacities.iter().copied(), len)
}
}
}
/// Estimate global min-cut from local values
///
/// OPTIMIZATION: Single-pass computation
#[inline]
fn estimate_global_mincut(&self, reports: &[WorkerReport]) -> f64 {
// OPTIMIZATION: Single pass for both local_sum and boundary_count
let (local_sum, boundary_count) = reports.iter().fold((0.0, 0usize), |(sum, count), r| {
let bc = r.boundary_edges.iter().filter(|e| e.is_boundary).count();
(sum + r.local_mincut, count + bc)
});
// Simple estimate: local sum adjusted by boundary factor
// OPTIMIZATION: Pre-compute constant multiplier
let boundary_factor = 1.0 / (1.0 + (boundary_count as f64 * 0.01));
local_sum * boundary_factor
}
/// Aggregate confidence from all workers
///
/// OPTIMIZATION: Inline, uses fold for single-pass computation
#[inline]
fn aggregate_confidence(&self, reports: &[WorkerReport]) -> f64 {
let len = reports.len();
if len == 0 {
return 0.0;
}
match self.strategy {
MergeStrategy::ByzantineFaultTolerant => {
// Conservative: use minimum of top 2/3
let mut confidences: Vec<f64> = Vec::with_capacity(len);
confidences.extend(reports.iter().map(|r| r.confidence));
// Sort descending
confidences
.sort_unstable_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
let threshold = (len * 2) / 3;
confidences
.get(threshold.saturating_sub(1))
.copied()
.unwrap_or(0.0)
}
_ => {
// Geometric mean using log-sum for numerical stability
// OPTIMIZATION: Use log-sum-exp pattern to avoid overflow
let log_sum: f64 = reports.iter().map(|r| r.confidence.ln()).sum();
(log_sum / len as f64).exp()
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn create_test_report(tile_id: TileId, epoch: Epoch) -> WorkerReport {
let mut report = WorkerReport::new(tile_id, epoch);
report.add_node(NodeSummary {
id: "node1".to_string(),
weight: tile_id as f64 * 0.1,
edge_count: 5,
coherence: 0.9,
});
report.confidence = 0.95;
report.local_mincut = 1.0;
report
}
#[test]
fn test_merge_simple_average() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports = vec![
create_test_report(1, 0),
create_test_report(2, 0),
create_test_report(3, 0),
];
let merged = merger.merge(&reports).unwrap();
assert_eq!(merged.worker_count, 3);
assert_eq!(merged.epoch, 0);
let node = merged.super_nodes.get("node1").unwrap();
// Average of 0.1, 0.2, 0.3 = 0.2
assert!((node.weight - 0.2).abs() < 0.001);
}
#[test]
fn test_merge_empty_reports() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let result = merger.merge(&[]);
assert!(matches!(result, Err(MergeError::EmptyReports)));
}
#[test]
fn test_merge_conflicting_epochs() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports = vec![create_test_report(1, 0), create_test_report(2, 1)];
let result = merger.merge(&reports);
assert!(matches!(result, Err(MergeError::ConflictingEpochs)));
}
#[test]
fn test_state_hash_computation() {
let mut report = create_test_report(1, 0);
report.compute_state_hash();
assert_ne!(report.state_hash, [0u8; 32]);
}
}

View File

@@ -0,0 +1,303 @@
//! Permit token issuance and verification
use crate::{ActionId, GateDecision};
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier as Ed25519Verifier, VerifyingKey};
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
/// Permit token: a signed capability that agents must present
#[repr(C)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PermitToken {
/// Gate decision
pub decision: GateDecision,
/// Action being permitted
pub action_id: ActionId,
/// Timestamp (nanoseconds since epoch)
pub timestamp: u64,
/// Time-to-live in nanoseconds
pub ttl_ns: u64,
/// Hash of the witness data
#[serde(with = "hex::serde")]
pub witness_hash: [u8; 32],
/// Sequence number
pub sequence: u64,
/// Full Ed25519 signature (64 bytes)
#[serde(with = "hex::serde")]
pub signature: [u8; 64],
}
impl PermitToken {
/// Check if token is still valid (not expired)
pub fn is_valid_time(&self, now_ns: u64) -> bool {
now_ns <= self.timestamp + self.ttl_ns
}
/// Encode token to base64 for transport
pub fn encode_base64(&self) -> String {
let json = serde_json::to_vec(self).unwrap_or_default();
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, &json)
}
/// Decode token from base64
pub fn decode_base64(encoded: &str) -> Result<Self, TokenDecodeError> {
let bytes = base64::Engine::decode(&base64::engine::general_purpose::STANDARD, encoded)
.map_err(|_| TokenDecodeError::InvalidBase64)?;
serde_json::from_slice(&bytes).map_err(|_| TokenDecodeError::InvalidJson)
}
/// Get the content to be signed (excludes mac field)
pub fn signable_content(&self) -> Vec<u8> {
let mut content = Vec::with_capacity(128);
content.extend_from_slice(&self.sequence.to_le_bytes());
content.extend_from_slice(&self.timestamp.to_le_bytes());
content.extend_from_slice(&self.ttl_ns.to_le_bytes());
content.extend_from_slice(&self.witness_hash);
content.extend_from_slice(self.action_id.as_bytes());
content.push(self.decision as u8);
content
}
}
/// Error decoding a token
#[derive(Debug, thiserror::Error)]
pub enum TokenDecodeError {
#[error("Invalid base64 encoding")]
InvalidBase64,
#[error("Invalid JSON structure")]
InvalidJson,
}
/// Permit state: manages signing keys and token issuance
pub struct PermitState {
/// Signing key for tokens
signing_key: SigningKey,
/// Next sequence number
next_sequence: std::sync::atomic::AtomicU64,
}
impl PermitState {
/// Create new permit state with fresh signing key
pub fn new() -> Self {
let signing_key = SigningKey::generate(&mut OsRng);
Self {
signing_key,
next_sequence: std::sync::atomic::AtomicU64::new(0),
}
}
/// Create permit state with a specific signing key
pub fn with_key(signing_key: SigningKey) -> Self {
Self {
signing_key,
next_sequence: std::sync::atomic::AtomicU64::new(0),
}
}
/// Get the next sequence number
pub fn next_sequence(&self) -> u64 {
self.next_sequence
.fetch_add(1, std::sync::atomic::Ordering::SeqCst)
}
/// Sign a token with full Ed25519 signature
pub fn sign_token(&self, mut token: PermitToken) -> PermitToken {
let content = token.signable_content();
let hash = blake3::hash(&content);
let signature = self.signing_key.sign(hash.as_bytes());
// Store full 64-byte Ed25519 signature
token.signature.copy_from_slice(&signature.to_bytes());
token
}
/// Get a verifier for this permit state
pub fn verifier(&self) -> Verifier {
Verifier {
verifying_key: self.signing_key.verifying_key(),
}
}
}
impl Default for PermitState {
fn default() -> Self {
Self::new()
}
}
/// Token verifier with actual Ed25519 signature verification
#[derive(Clone)]
pub struct Verifier {
/// Ed25519 verifying key
verifying_key: VerifyingKey,
}
impl Verifier {
/// Create a new verifier from a verifying key
pub fn new(verifying_key: VerifyingKey) -> Self {
Self { verifying_key }
}
/// Verify a token's Ed25519 signature
pub fn verify(&self, token: &PermitToken) -> Result<(), VerifyError> {
// Compute hash of signable content
let content = token.signable_content();
let hash = blake3::hash(&content);
// Reconstruct the Ed25519 signature from stored bytes
let signature = Signature::from_bytes(&token.signature);
// Actually verify the signature using Ed25519
self.verifying_key
.verify(hash.as_bytes(), &signature)
.map_err(|_| VerifyError::SignatureFailed)
}
/// Verify token is valid (signature + time)
pub fn verify_full(&self, token: &PermitToken) -> Result<(), VerifyError> {
// Check signature first
self.verify(token)?;
// Check TTL - use saturating add to prevent overflow
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64;
let expiry = token.timestamp.saturating_add(token.ttl_ns);
if now > expiry {
return Err(VerifyError::Expired);
}
Ok(())
}
}
/// Verification error
#[derive(Debug, thiserror::Error)]
pub enum VerifyError {
#[error("Signature verification failed")]
SignatureFailed,
#[error("Hash mismatch")]
HashMismatch,
#[error("Token has expired")]
Expired,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_token_sign_verify() {
let state = PermitState::new();
let verifier = state.verifier();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
assert!(verifier.verify(&signed).is_ok());
}
#[test]
fn test_token_tamper_detection() {
let state = PermitState::new();
let verifier = state.verifier();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let mut signed = state.sign_token(token);
// Tamper with the action_id
signed.action_id = "malicious-action".to_string();
// Verification should fail
assert!(verifier.verify(&signed).is_err());
}
#[test]
fn test_token_wrong_key_rejection() {
let state1 = PermitState::new();
let state2 = PermitState::new();
let verifier2 = state2.verifier();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Sign with state1's key
let signed = state1.sign_token(token);
// Verify with state2's key should fail
assert!(verifier2.verify(&signed).is_err());
}
#[test]
fn test_token_base64_roundtrip() {
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let encoded = token.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
assert_eq!(token.action_id, decoded.action_id);
assert_eq!(token.sequence, decoded.sequence);
}
#[test]
fn test_token_expiry() {
let state = PermitState::new();
let verifier = state.verifier();
// Create a token that expired in the past
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000, // Long ago
ttl_ns: 1, // 1 nanosecond TTL
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
// Signature should be valid
assert!(verifier.verify(&signed).is_ok());
// But full verification (including TTL) should fail
assert!(matches!(
verifier.verify_full(&signed),
Err(VerifyError::Expired)
));
}
}

View File

@@ -0,0 +1,271 @@
//! Witness receipt and hash-chained log
use crate::{ChainVerifyError, PermitToken};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Witness receipt: cryptographic proof of a gate decision
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WitnessReceipt {
/// Sequence number
pub sequence: u64,
/// The permit token issued
pub token: PermitToken,
/// Hash of the previous receipt
#[serde(with = "hex::serde")]
pub previous_hash: [u8; 32],
/// Summary of witness data
pub witness_summary: WitnessSummary,
/// Timestamp proof
pub timestamp_proof: TimestampProof,
}
impl WitnessReceipt {
/// Compute the hash of this receipt
pub fn hash(&self) -> [u8; 32] {
let mut hasher = blake3::Hasher::new();
hasher.update(&self.sequence.to_le_bytes());
hasher.update(&self.token.signable_content());
hasher.update(&self.previous_hash);
hasher.update(&self.witness_summary.hash());
*hasher.finalize().as_bytes()
}
}
/// Timestamp proof for receipts
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimestampProof {
/// Timestamp
pub timestamp: u64,
/// Hash of previous receipt
#[serde(with = "hex::serde")]
pub previous_receipt_hash: [u8; 32],
/// Merkle root (for batch anchoring)
#[serde(with = "hex::serde")]
pub merkle_root: [u8; 32],
}
/// Summary of witness data from the three filters
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WitnessSummary {
/// Structural witness
pub structural: StructuralWitness,
/// Predictive witness
pub predictive: PredictiveWitness,
/// Evidential witness
pub evidential: EvidentialWitness,
}
impl WitnessSummary {
/// Create an empty witness summary
pub fn empty() -> Self {
Self {
structural: StructuralWitness {
cut_value: 0.0,
partition: "unknown".to_string(),
critical_edges: 0,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 0,
coverage: 0.0,
},
evidential: EvidentialWitness {
e_value: 1.0,
verdict: "unknown".to_string(),
},
}
}
/// Compute hash of the summary
pub fn hash(&self) -> [u8; 32] {
let json = serde_json::to_vec(self).unwrap_or_default();
*blake3::hash(&json).as_bytes()
}
/// Convert to JSON
pub fn to_json(&self) -> serde_json::Value {
serde_json::to_value(self).unwrap_or(serde_json::Value::Null)
}
}
/// Structural witness from min-cut analysis
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StructuralWitness {
/// Cut value
pub cut_value: f64,
/// Partition status
pub partition: String,
/// Number of critical edges
pub critical_edges: usize,
/// Boundary edge IDs
#[serde(default)]
pub boundary: Vec<String>,
}
/// Predictive witness from conformal prediction
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PredictiveWitness {
/// Prediction set size
pub set_size: usize,
/// Coverage target
pub coverage: f64,
}
/// Evidential witness from e-process
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvidentialWitness {
/// Accumulated e-value
pub e_value: f64,
/// Verdict (accept/continue/reject)
pub verdict: String,
}
/// Hash-chained receipt log
pub struct ReceiptLog {
/// Receipts by sequence number
receipts: HashMap<u64, WitnessReceipt>,
/// Latest sequence number
latest_sequence: Option<u64>,
/// Hash of the latest receipt
latest_hash: [u8; 32],
}
impl ReceiptLog {
/// Create a new receipt log
pub fn new() -> Self {
Self {
receipts: HashMap::new(),
latest_sequence: None,
latest_hash: [0u8; 32], // Genesis hash
}
}
/// Get the last hash in the chain
pub fn last_hash(&self) -> [u8; 32] {
self.latest_hash
}
/// Append a receipt to the log
pub fn append(&mut self, receipt: WitnessReceipt) {
let hash = receipt.hash();
let seq = receipt.sequence;
self.receipts.insert(seq, receipt);
self.latest_sequence = Some(seq);
self.latest_hash = hash;
}
/// Get a receipt by sequence number
pub fn get(&self, sequence: u64) -> Option<&WitnessReceipt> {
self.receipts.get(&sequence)
}
/// Get the latest sequence number
pub fn latest_sequence(&self) -> Option<u64> {
self.latest_sequence
}
/// Verify the hash chain up to a sequence number
pub fn verify_chain_to(&self, sequence: u64) -> Result<(), ChainVerifyError> {
let mut expected_previous = [0u8; 32]; // Genesis
for seq in 0..=sequence {
let receipt = self
.receipts
.get(&seq)
.ok_or(ChainVerifyError::ReceiptNotFound { sequence: seq })?;
if receipt.previous_hash != expected_previous {
return Err(ChainVerifyError::HashMismatch { sequence: seq });
}
expected_previous = receipt.hash();
}
Ok(())
}
/// Get the number of receipts
pub fn len(&self) -> usize {
self.receipts.len()
}
/// Check if log is empty
pub fn is_empty(&self) -> bool {
self.receipts.is_empty()
}
/// Iterate over receipts
pub fn iter(&self) -> impl Iterator<Item = &WitnessReceipt> {
self.receipts.values()
}
}
impl Default for ReceiptLog {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::GateDecision;
#[test]
fn test_receipt_hash() {
let receipt = WitnessReceipt {
sequence: 0,
token: PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
},
previous_hash: [0u8; 32],
witness_summary: WitnessSummary::empty(),
timestamp_proof: TimestampProof {
timestamp: 1000,
previous_receipt_hash: [0u8; 32],
merkle_root: [0u8; 32],
},
};
let hash = receipt.hash();
assert_ne!(hash, [0u8; 32]);
}
#[test]
fn test_receipt_log_chain() {
let mut log = ReceiptLog::new();
for i in 0..3 {
let receipt = WitnessReceipt {
sequence: i,
token: PermitToken {
decision: GateDecision::Permit,
action_id: format!("action-{}", i),
timestamp: 1000 + i,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: i,
signature: [0u8; 64],
},
previous_hash: log.last_hash(),
witness_summary: WitnessSummary::empty(),
timestamp_proof: TimestampProof {
timestamp: 1000 + i,
previous_receipt_hash: log.last_hash(),
merkle_root: [0u8; 32],
},
};
log.append(receipt);
}
assert_eq!(log.len(), 3);
assert!(log.verify_chain_to(2).is_ok());
}
}

View File

@@ -0,0 +1,392 @@
//! Deterministic replay for auditing and debugging
//!
//! This module provides the ability to replay gate decisions for audit purposes,
//! ensuring that the same inputs produce the same outputs deterministically.
use crate::{GateDecision, WitnessReceipt, WitnessSummary};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Result of replaying a decision
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplayResult {
/// The replayed decision
pub decision: GateDecision,
/// Whether the replay matched the original
pub matched: bool,
/// Original decision from receipt
pub original_decision: GateDecision,
/// State snapshot at decision time
pub state_snapshot: WitnessSummary,
/// Differences if any
pub differences: Vec<ReplayDifference>,
}
/// A difference found during replay
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplayDifference {
/// Field that differs
pub field: String,
/// Original value
pub original: String,
/// Replayed value
pub replayed: String,
}
/// Snapshot of state for replay
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StateSnapshot {
/// Sequence number
pub sequence: u64,
/// Timestamp
pub timestamp: u64,
/// Global min-cut value
pub global_min_cut: f64,
/// Aggregate e-value
pub aggregate_e_value: f64,
/// Minimum coherence
pub min_coherence: i16,
/// Tile states
pub tile_states: HashMap<u8, TileSnapshot>,
}
/// Snapshot of a single tile's state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TileSnapshot {
/// Tile ID
pub tile_id: u8,
/// Coherence
pub coherence: i16,
/// E-value
pub e_value: f32,
/// Boundary edge count
pub boundary_edges: usize,
}
/// Engine for replaying decisions
pub struct ReplayEngine {
/// Checkpoints for state restoration
checkpoints: HashMap<u64, StateSnapshot>,
/// Checkpoint interval
checkpoint_interval: u64,
}
impl ReplayEngine {
/// Create a new replay engine
pub fn new(checkpoint_interval: u64) -> Self {
Self {
checkpoints: HashMap::new(),
checkpoint_interval,
}
}
/// Save a checkpoint
pub fn save_checkpoint(&mut self, sequence: u64, snapshot: StateSnapshot) {
if sequence % self.checkpoint_interval == 0 {
self.checkpoints.insert(sequence, snapshot);
}
}
/// Find the nearest checkpoint before a sequence
pub fn find_nearest_checkpoint(&self, sequence: u64) -> Option<(u64, &StateSnapshot)> {
self.checkpoints
.iter()
.filter(|(seq, _)| **seq <= sequence)
.max_by_key(|(seq, _)| *seq)
.map(|(seq, snap)| (*seq, snap))
}
/// Replay a decision from a receipt
pub fn replay(&self, receipt: &WitnessReceipt) -> ReplayResult {
// Get the witness summary from the receipt
let summary = &receipt.witness_summary;
// Reconstruct the decision based on the witness data
let replayed_decision = self.reconstruct_decision(summary);
// Compare with original
let original_decision = receipt.token.decision;
let matched = replayed_decision == original_decision;
let mut differences = Vec::new();
if !matched {
differences.push(ReplayDifference {
field: "decision".to_string(),
original: format!("{:?}", original_decision),
replayed: format!("{:?}", replayed_decision),
});
}
ReplayResult {
decision: replayed_decision,
matched,
original_decision,
state_snapshot: summary.clone(),
differences,
}
}
/// Reconstruct decision from witness summary
fn reconstruct_decision(&self, summary: &WitnessSummary) -> GateDecision {
// Apply the same three-filter logic as in TileZero
// 1. Structural filter
if summary.structural.partition == "fragile" {
return GateDecision::Deny;
}
// 2. Evidence filter
if summary.evidential.verdict == "reject" {
return GateDecision::Deny;
}
if summary.evidential.verdict == "continue" {
return GateDecision::Defer;
}
// 3. Prediction filter
if summary.predictive.set_size > 20 {
return GateDecision::Defer;
}
GateDecision::Permit
}
/// Verify a sequence of receipts for consistency
pub fn verify_sequence(&self, receipts: &[WitnessReceipt]) -> SequenceVerification {
let mut results = Vec::new();
let mut all_matched = true;
for receipt in receipts {
let result = self.replay(receipt);
if !result.matched {
all_matched = false;
}
results.push((receipt.sequence, result));
}
SequenceVerification {
total_receipts: receipts.len(),
all_matched,
results,
}
}
/// Export checkpoint for external storage
pub fn export_checkpoint(&self, sequence: u64) -> Option<Vec<u8>> {
self.checkpoints
.get(&sequence)
.and_then(|snap| serde_json::to_vec(snap).ok())
}
/// Import checkpoint from external storage
pub fn import_checkpoint(&mut self, sequence: u64, data: &[u8]) -> Result<(), ReplayError> {
let snapshot: StateSnapshot =
serde_json::from_slice(data).map_err(|_| ReplayError::InvalidCheckpoint)?;
self.checkpoints.insert(sequence, snapshot);
Ok(())
}
/// Clear old checkpoints to manage memory
pub fn prune_before(&mut self, sequence: u64) {
self.checkpoints.retain(|seq, _| *seq >= sequence);
}
/// Get checkpoint count
pub fn checkpoint_count(&self) -> usize {
self.checkpoints.len()
}
}
impl Default for ReplayEngine {
fn default() -> Self {
Self::new(100)
}
}
/// Result of verifying a sequence of receipts
#[derive(Debug)]
pub struct SequenceVerification {
/// Total number of receipts verified
pub total_receipts: usize,
/// Whether all replays matched
pub all_matched: bool,
/// Individual results
pub results: Vec<(u64, ReplayResult)>,
}
impl SequenceVerification {
/// Get the mismatches
pub fn mismatches(&self) -> impl Iterator<Item = &(u64, ReplayResult)> {
self.results.iter().filter(|(_, r)| !r.matched)
}
/// Get mismatch count
pub fn mismatch_count(&self) -> usize {
self.results.iter().filter(|(_, r)| !r.matched).count()
}
}
/// Error during replay
#[derive(Debug, thiserror::Error)]
pub enum ReplayError {
#[error("Receipt not found for sequence {sequence}")]
ReceiptNotFound { sequence: u64 },
#[error("Checkpoint not found for sequence {sequence}")]
CheckpointNotFound { sequence: u64 },
#[error("Invalid checkpoint data")]
InvalidCheckpoint,
#[error("State reconstruction failed: {reason}")]
ReconstructionFailed { reason: String },
#[error("Hash chain verification failed at sequence {sequence}")]
ChainVerificationFailed { sequence: u64 },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
EvidentialWitness, PermitToken, PredictiveWitness, StructuralWitness, TimestampProof,
};
fn create_test_receipt(sequence: u64, decision: GateDecision) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: PermitToken {
decision,
action_id: format!("action-{}", sequence),
timestamp: 1000 + sequence,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
},
previous_hash: [0u8; 32],
witness_summary: WitnessSummary {
structural: StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 0,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 5,
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 100.0,
verdict: "accept".to_string(),
},
},
timestamp_proof: TimestampProof {
timestamp: 1000 + sequence,
previous_receipt_hash: [0u8; 32],
merkle_root: [0u8; 32],
},
}
}
#[test]
fn test_replay_matching() {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(0, GateDecision::Permit);
let result = engine.replay(&receipt);
assert!(result.matched);
assert_eq!(result.decision, GateDecision::Permit);
}
#[test]
fn test_replay_mismatch() {
let engine = ReplayEngine::new(100);
let mut receipt = create_test_receipt(0, GateDecision::Permit);
// Modify the witness to indicate a deny condition
receipt.witness_summary.structural.partition = "fragile".to_string();
let result = engine.replay(&receipt);
assert!(!result.matched);
assert_eq!(result.decision, GateDecision::Deny);
assert!(!result.differences.is_empty());
}
#[test]
fn test_checkpoint_save_load() {
let mut engine = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(0, snapshot.clone());
assert_eq!(engine.checkpoint_count(), 1);
let (seq, found) = engine.find_nearest_checkpoint(5).unwrap();
assert_eq!(seq, 0);
assert_eq!(found.global_min_cut, 10.0);
}
#[test]
fn test_sequence_verification() {
let engine = ReplayEngine::new(100);
let receipts = vec![
create_test_receipt(0, GateDecision::Permit),
create_test_receipt(1, GateDecision::Permit),
create_test_receipt(2, GateDecision::Permit),
];
let verification = engine.verify_sequence(&receipts);
assert_eq!(verification.total_receipts, 3);
assert!(verification.all_matched);
assert_eq!(verification.mismatch_count(), 0);
}
#[test]
fn test_prune_checkpoints() {
let mut engine = ReplayEngine::new(10);
for i in (0..100).step_by(10) {
let snapshot = StateSnapshot {
sequence: i as u64,
timestamp: 1000 + i as u64,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(i as u64, snapshot);
}
assert_eq!(engine.checkpoint_count(), 10);
engine.prune_before(50);
assert_eq!(engine.checkpoint_count(), 5);
}
#[test]
fn test_checkpoint_export_import() {
let mut engine = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(0, snapshot);
let exported = engine.export_checkpoint(0).unwrap();
let mut engine2 = ReplayEngine::new(10);
engine2.import_checkpoint(0, &exported).unwrap();
assert_eq!(engine2.checkpoint_count(), 1);
}
}

View File

@@ -0,0 +1,218 @@
//! Reduced supergraph from worker tile summaries
use crate::receipt::WitnessSummary;
use crate::{TileId, WitnessFragment};
use std::collections::HashMap;
/// Reduced graph maintained by TileZero
pub struct ReducedGraph {
/// Coherence scores per tile
tile_coherence: HashMap<TileId, f32>,
/// Global cut value
global_cut_value: f64,
/// Aggregated e-value
aggregated_e_value: f64,
/// Shift pressure
shift_pressure: f64,
/// Boundary edge count
boundary_edges: usize,
}
impl ReducedGraph {
/// Create a new reduced graph
pub fn new() -> Self {
Self {
tile_coherence: HashMap::new(),
global_cut_value: 100.0, // Start with high coherence
aggregated_e_value: 100.0, // Start with high evidence
shift_pressure: 0.0,
boundary_edges: 0,
}
}
/// Update from a witness fragment
pub fn update_from_fragment(&mut self, fragment: &WitnessFragment) {
self.boundary_edges = fragment.boundary_edges.len();
// Update global cut based on local cuts
self.global_cut_value = self.global_cut_value.min(fragment.cut_value as f64);
}
/// Update coherence for a tile
pub fn update_coherence(&mut self, tile_id: TileId, coherence: f32) {
self.tile_coherence.insert(tile_id, coherence);
// Recompute aggregates
if !self.tile_coherence.is_empty() {
let sum: f32 = self.tile_coherence.values().sum();
let avg = sum / self.tile_coherence.len() as f32;
// Use average coherence to influence e-value
self.aggregated_e_value = (avg as f64) * 100.0;
}
}
/// Get the global cut value
pub fn global_cut(&self) -> f64 {
self.global_cut_value
}
/// Aggregate shift pressure across tiles
pub fn aggregate_shift_pressure(&self) -> f64 {
self.shift_pressure
}
/// Aggregate evidence across tiles
pub fn aggregate_evidence(&self) -> f64 {
self.aggregated_e_value
}
/// Generate witness summary
pub fn witness_summary(&self) -> WitnessSummary {
use crate::receipt::{EvidentialWitness, PredictiveWitness, StructuralWitness};
let partition = if self.global_cut_value >= 10.0 {
"stable"
} else if self.global_cut_value >= 5.0 {
"marginal"
} else {
"fragile"
};
let verdict = if self.aggregated_e_value >= 100.0 {
"accept"
} else if self.aggregated_e_value >= 0.01 {
"continue"
} else {
"reject"
};
WitnessSummary {
structural: StructuralWitness {
cut_value: self.global_cut_value,
partition: partition.to_string(),
critical_edges: self.boundary_edges,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 1, // Simplified
coverage: 0.95,
},
evidential: EvidentialWitness {
e_value: self.aggregated_e_value,
verdict: verdict.to_string(),
},
}
}
/// Set shift pressure (for testing or external updates)
pub fn set_shift_pressure(&mut self, pressure: f64) {
self.shift_pressure = pressure;
}
/// Set global cut value (for testing or external updates)
pub fn set_global_cut(&mut self, cut: f64) {
self.global_cut_value = cut;
}
/// Set aggregated evidence (for testing or external updates)
pub fn set_evidence(&mut self, evidence: f64) {
self.aggregated_e_value = evidence;
}
}
impl Default for ReducedGraph {
fn default() -> Self {
Self::new()
}
}
/// Structural filter for graph-based decisions
pub struct StructuralFilter {
/// Minimum cut threshold
min_cut: f64,
}
impl StructuralFilter {
/// Create a new structural filter
pub fn new(min_cut: f64) -> Self {
Self { min_cut }
}
/// Evaluate if structure is stable
pub fn is_stable(&self, graph: &ReducedGraph) -> bool {
graph.global_cut() >= self.min_cut
}
}
/// Shift pressure tracking
pub struct ShiftPressure {
/// Current pressure
current: f64,
/// Threshold for deferral
threshold: f64,
}
impl ShiftPressure {
/// Create new shift pressure tracker
pub fn new(threshold: f64) -> Self {
Self {
current: 0.0,
threshold,
}
}
/// Update with new observation
pub fn update(&mut self, value: f64) {
// Exponential moving average
self.current = 0.9 * self.current + 0.1 * value;
}
/// Check if shift is detected
pub fn is_shifting(&self) -> bool {
self.current >= self.threshold
}
/// Get current pressure
pub fn current(&self) -> f64 {
self.current
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reduced_graph() {
let mut graph = ReducedGraph::new();
assert!(graph.global_cut() >= 100.0);
graph.update_coherence(1, 0.9);
graph.update_coherence(2, 0.8);
let summary = graph.witness_summary();
assert_eq!(summary.structural.partition, "stable");
}
#[test]
fn test_structural_filter() {
let filter = StructuralFilter::new(5.0);
let mut graph = ReducedGraph::new();
assert!(filter.is_stable(&graph));
graph.set_global_cut(3.0);
assert!(!filter.is_stable(&graph));
}
#[test]
fn test_shift_pressure() {
let mut pressure = ShiftPressure::new(0.5);
for _ in 0..20 {
pressure.update(0.8);
}
assert!(pressure.is_shifting());
}
}