Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,64 @@
[package]
name = "cognitum-gate-tilezero"
version = "0.1.1"
edition = "2021"
description = "Native arbiter for TileZero in the Anytime-Valid Coherence Gate"
license = "MIT OR Apache-2.0"
repository = "https://github.com/ruvnet/ruvector"
readme = "README.md"
keywords = ["coherence", "gate", "arbiter", "security"]
categories = ["cryptography", "authentication"]
[lib]
[features]
default = []
mincut = ["ruvector-mincut"]
audit-replay = []
[dependencies]
ruvector-mincut = { version = "0.1.30", optional = true }
blake3 = "1.5"
ed25519-dalek = { version = "2.1", features = ["rand_core", "serde"] }
rand = "0.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
thiserror = "1.0"
tokio = { version = "1.0", features = ["sync", "time"] }
tracing = "0.1"
base64 = "0.22"
hex = { version = "0.4", features = ["serde"] }
[dev-dependencies]
criterion = { version = "0.5", features = ["html_reports", "async_tokio"] }
proptest = "1.4"
rand = "0.8"
tokio = { version = "1.0", features = ["rt-multi-thread", "macros", "sync", "time"] }
[[bench]]
name = "decision_bench"
harness = false
[[bench]]
name = "crypto_bench"
harness = false
[[bench]]
name = "merge_bench"
harness = false
[[bench]]
name = "benchmarks"
harness = false
[[example]]
name = "basic_gate"
required-features = []
[[example]]
name = "human_escalation"
required-features = []
[[example]]
name = "receipt_audit"
required-features = []

View File

@@ -0,0 +1,607 @@
# cognitum-gate-tilezero: The Central Arbiter
<p align="center">
<a href="https://ruv.io"><img src="https://img.shields.io/badge/ruv.io-coherence_gate-blueviolet?style=for-the-badge" alt="ruv.io"></a>
<a href="https://github.com/ruvnet/ruvector"><img src="https://img.shields.io/badge/RuVector-monorepo-orange?style=for-the-badge&logo=github" alt="RuVector"></a>
</p>
<p align="center">
<img src="https://img.shields.io/crates/v/cognitum-gate-tilezero" alt="Crates.io">
<img src="https://img.shields.io/badge/latency-<100μs-blue" alt="Latency">
<img src="https://img.shields.io/badge/license-MIT%2FApache--2.0-green" alt="License">
<img src="https://img.shields.io/badge/rust-1.77%2B-orange?logo=rust" alt="Rust">
</p>
<p align="center">
<strong>Native arbiter for the Anytime-Valid Coherence Gate in a 256-tile WASM fabric</strong>
</p>
<p align="center">
<em>TileZero merges worker reports, makes gate decisions, and issues cryptographically signed permit tokens.</em>
</p>
<p align="center">
<a href="#what-is-tilezero">What is TileZero?</a> •
<a href="#quick-start">Quick Start</a> •
<a href="#key-capabilities">Capabilities</a> •
<a href="#tutorials">Tutorials</a> •
<a href="https://ruv.io">ruv.io</a>
</p>
---
## What is TileZero?
**TileZero** is the central coordinator in a distributed coherence assessment system. In a 256-tile WASM fabric, TileZero (tile 0) acts as the arbiter that:
1. **Merges** worker tile reports into a unified supergraph
2. **Decides** whether to Permit, Defer, or Deny actions
3. **Signs** cryptographic permit tokens with Ed25519
4. **Logs** every decision in a Blake3 hash-chained receipt log
### Architecture Overview
```
Worker Tiles (1-255) TileZero (Tile 0)
┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────────┐
│ Tile 1 │ │ Tile 2 │ │Tile 255 │ │ TileZero │
│ ─────── │ │ ─────── │ │ ─────── │ │ Arbiter │
│ Local │ │ Local │ │ Local │ ───► │ ─────────── │
│ Graph │ │ Graph │ │ Graph │ │ Supergraph │
│ Report │ │ Report │ │ Report │ │ Decision │
└────┬────┘ └────┬────┘ └────┬────┘ │ PermitToken │
│ │ │ │ ReceiptLog │
└───────────┴───────────┴──────────►└─────────────┘
```
### The Three-Filter Decision Pipeline
TileZero applies three stacked filters to every action request:
| Filter | Question | Pass Condition |
|--------|----------|----------------|
| **Structural** | Is the graph well-connected? | Min-cut ≥ threshold |
| **Shift** | Is the distribution stable? | Shift pressure < max |
| **Evidence** | Have we accumulated enough confidence? | E-value in safe range |
```
Action Request → [Structural] → [Shift] → [Evidence] → PERMIT/DEFER/DENY
↓ ↓ ↓
Graph cut Distribution E-value
healthy? stable? confident?
```
---
## Quick Start
### Installation
```toml
[dependencies]
cognitum-gate-tilezero = "0.1"
# With min-cut integration
cognitum-gate-tilezero = { version = "0.1", features = ["mincut"] }
```
### Basic Usage
```rust
use cognitum_gate_tilezero::{
TileZero, GateThresholds, ActionContext, ActionTarget, ActionMetadata,
GateDecision,
};
#[tokio::main]
async fn main() {
// Create TileZero with default thresholds
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
// Define an action to evaluate
let action = ActionContext {
action_id: "action-001".to_string(),
action_type: "config_change".to_string(),
target: ActionTarget {
device: Some("router-1".to_string()),
path: Some("/config/firewall".to_string()),
extra: Default::default(),
},
context: ActionMetadata {
agent_id: "agent-42".to_string(),
session_id: Some("session-abc".to_string()),
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
// Get a decision
let token = tilezero.decide(&action).await;
match token.decision {
GateDecision::Permit => println!("✅ Action permitted"),
GateDecision::Defer => println!("⚠️ Action deferred - escalate"),
GateDecision::Deny => println!("🛑 Action denied"),
}
// Token is cryptographically signed
println!("Sequence: {}", token.sequence);
println!("Witness hash: {:x?}", &token.witness_hash[..8]);
}
```
---
## Key Capabilities
### Core Features
| Capability | Description |
|------------|-------------|
| **Report Merging** | Combine 255 worker tile reports into unified supergraph |
| **Three-Filter Pipeline** | Structural + Shift + Evidence decision making |
| **Ed25519 Signing** | Cryptographic permit tokens that can't be forged |
| **Blake3 Hash Chain** | Tamper-evident receipt log for audit compliance |
| **Async/Await** | Full Tokio async support for concurrent operations |
### Decision Outcomes
| Decision | Meaning | Recommended Action |
|----------|---------|-------------------|
| `Permit` | All filters pass, action is safe | Proceed immediately |
| `Defer` | Uncertainty detected | Escalate to human or wait |
| `Deny` | Structural issue detected | Block action, quarantine region |
---
## Tutorials
<details>
<summary><strong>Tutorial 1: Processing Worker Reports</strong></summary>
### Collecting and Merging Tile Reports
Worker tiles continuously monitor their local patch of the coherence graph. TileZero collects these reports and maintains a global view.
```rust
use cognitum_gate_tilezero::{TileZero, TileReport, WitnessFragment, GateThresholds};
#[tokio::main]
async fn main() {
let tilezero = TileZero::new(GateThresholds::default());
// Simulate reports from worker tiles
let reports = vec![
TileReport {
tile_id: 1,
coherence: 0.95,
boundary_moved: false,
suspicious_edges: vec![],
e_value: 1.0,
witness_fragment: None,
},
TileReport {
tile_id: 2,
coherence: 0.87,
boundary_moved: true,
suspicious_edges: vec![42, 43],
e_value: 0.8,
witness_fragment: Some(WitnessFragment {
tile_id: 2,
boundary_edges: vec![42, 43],
cut_value: 5.2,
}),
},
];
// Merge reports into supergraph
tilezero.collect_reports(&reports).await;
println!("Reports collected from {} tiles", reports.len());
}
```
**Key Concepts:**
- **boundary_moved**: Indicates structural change requiring supergraph update
- **witness_fragment**: Contains boundary information for witness computation
- **e_value**: Local evidence accumulator for statistical testing
</details>
<details>
<summary><strong>Tutorial 2: Verifying Permit Tokens</strong></summary>
### Token Verification and Validation
Permit tokens are Ed25519 signed and time-bounded. Recipients should verify before acting.
```rust
use cognitum_gate_tilezero::{TileZero, GateThresholds, Verifier};
#[tokio::main]
async fn main() {
let tilezero = TileZero::new(GateThresholds::default());
// Get the verifier (contains public key)
let verifier: Verifier = tilezero.verifier();
// Later, when receiving a token...
let action = create_action();
let token = tilezero.decide(&action).await;
// Verify signature
match verifier.verify(&token) {
Ok(()) => println!("✅ Valid signature"),
Err(e) => println!("❌ Invalid: {:?}", e),
}
// Check time validity
let now_ns = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64;
if token.timestamp + token.ttl_ns > now_ns {
println!("⏰ Token still valid");
} else {
println!("⏰ Token expired");
}
}
```
</details>
<details>
<summary><strong>Tutorial 3: Audit Trail with Receipt Log</strong></summary>
### Tamper-Evident Decision Logging
Every decision is logged in a Blake3 hash chain for compliance and debugging.
```rust
use cognitum_gate_tilezero::{TileZero, GateThresholds};
#[tokio::main]
async fn main() {
let tilezero = TileZero::new(GateThresholds::default());
// Make several decisions
for i in 0..5 {
let action = ActionContext {
action_id: format!("action-{}", i),
action_type: "test".to_string(),
target: Default::default(),
context: Default::default(),
};
let _ = tilezero.decide(&action).await;
}
// Retrieve specific receipt
if let Some(receipt) = tilezero.get_receipt(2).await {
println!("Receipt #2:");
println!(" Decision: {:?}", receipt.token.decision);
println!(" Timestamp: {}", receipt.token.timestamp);
println!(" Previous hash: {:x?}", &receipt.previous_hash[..8]);
}
// Verify chain integrity
match tilezero.verify_receipt_chain().await {
Ok(()) => println!("✅ Hash chain intact"),
Err(e) => println!("❌ Chain broken: {:?}", e),
}
// Export for audit
let json = tilezero.export_receipts_json().await.unwrap();
println!("Exported {} bytes of audit data", json.len());
}
```
</details>
<details>
<summary><strong>Tutorial 4: Custom Thresholds Configuration</strong></summary>
### Tuning the Decision Pipeline
Adjust thresholds based on your security requirements and system characteristics.
```rust
use cognitum_gate_tilezero::{TileZero, GateThresholds};
fn main() {
// Conservative settings (more DENY/DEFER)
let conservative = GateThresholds {
min_cut: 10.0, // Higher min-cut requirement
max_shift: 0.1, // Lower tolerance for distribution shift
tau_deny: 0.001, // Lower e-value triggers DENY
tau_permit: 1000.0, // Higher e-value needed for PERMIT
permit_ttl_ns: 100_000, // Shorter token validity (100μs)
};
// Permissive settings (more PERMIT)
let permissive = GateThresholds {
min_cut: 3.0, // Lower connectivity requirement
max_shift: 0.5, // Higher tolerance for shift
tau_deny: 0.0001, // Very low e-value for DENY
tau_permit: 10.0, // Lower e-value sufficient for PERMIT
permit_ttl_ns: 10_000_000, // Longer validity (10ms)
};
// Production defaults
let default = GateThresholds::default();
println!("Conservative min_cut: {}", conservative.min_cut);
println!("Permissive min_cut: {}", permissive.min_cut);
println!("Default min_cut: {}", default.min_cut);
}
```
**Threshold Guidelines:**
| Parameter | Low Value Effect | High Value Effect |
|-----------|------------------|-------------------|
| `min_cut` | More permissive | More conservative |
| `max_shift` | More conservative | More permissive |
| `tau_deny` | More permissive | More conservative |
| `tau_permit` | More conservative | More permissive |
| `permit_ttl_ns` | Tighter security | Looser security |
</details>
<details>
<summary><strong>Tutorial 5: Human Escalation for DEFER Decisions</strong></summary>
### Handling Uncertain Situations
When TileZero returns DEFER, escalate to a human operator.
```rust
use cognitum_gate_tilezero::{TileZero, GateDecision, EscalationInfo};
async fn handle_action(tilezero: &TileZero, action: ActionContext) {
let token = tilezero.decide(&action).await;
match token.decision {
GateDecision::Permit => {
// Auto-approve
execute_action(&action).await;
}
GateDecision::Deny => {
// Auto-reject
log_rejection(&action, "Structural issue detected");
}
GateDecision::Defer => {
// Escalate to human
let escalation = EscalationInfo {
to: "security-team@example.com".to_string(),
context_url: format!("https://dashboard/actions/{}", action.action_id),
timeout_ns: 60_000_000_000, // 60 seconds
default_on_timeout: "deny".to_string(),
};
match await_human_decision(&escalation).await {
HumanDecision::Approve => execute_action(&action).await,
HumanDecision::Reject => log_rejection(&action, "Human rejected"),
HumanDecision::Timeout => log_rejection(&action, "Escalation timeout"),
}
}
}
}
```
</details>
---
## API Reference
<details>
<summary><strong>Core Types</strong></summary>
### GateDecision
```rust
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum GateDecision {
/// All filters pass - action is permitted
Permit,
/// Uncertainty - defer to human or wait
Defer,
/// Structural issue - deny action
Deny,
}
```
### GateThresholds
```rust
pub struct GateThresholds {
/// Minimum global min-cut value for PERMIT
pub min_cut: f64,
/// Maximum allowed shift pressure
pub max_shift: f64,
/// E-value below which to DENY
pub tau_deny: f64,
/// E-value above which to PERMIT
pub tau_permit: f64,
/// Permit token time-to-live in nanoseconds
pub permit_ttl_ns: u64,
}
```
### PermitToken
```rust
pub struct PermitToken {
/// The gate decision
pub decision: GateDecision,
/// ID of the action this token authorizes
pub action_id: ActionId,
/// Unix timestamp in nanoseconds
pub timestamp: u64,
/// Time-to-live in nanoseconds
pub ttl_ns: u64,
/// Blake3 hash of witness state
pub witness_hash: [u8; 32],
/// Sequence number in receipt log
pub sequence: u64,
/// Ed25519 signature
pub signature: [u8; 64],
}
```
</details>
<details>
<summary><strong>TileZero API</strong></summary>
### Constructor
```rust
impl TileZero {
/// Create a new TileZero arbiter with given thresholds
pub fn new(thresholds: GateThresholds) -> Self;
}
```
### Core Methods
```rust
impl TileZero {
/// Collect reports from worker tiles
pub async fn collect_reports(&self, reports: &[TileReport]);
/// Make a gate decision for an action
pub async fn decide(&self, action_ctx: &ActionContext) -> PermitToken;
/// Get a receipt by sequence number
pub async fn get_receipt(&self, sequence: u64) -> Option<WitnessReceipt>;
/// Verify hash chain integrity
pub async fn verify_chain_to(&self, sequence: u64) -> Result<(), ChainVerifyError>;
/// Get the token verifier (public key)
pub fn verifier(&self) -> Verifier;
/// Export receipts as JSON for audit
pub async fn export_receipts_json(&self) -> Result<String, serde_json::Error>;
}
```
</details>
---
## Feature Flags
| Feature | Description | Default |
|---------|-------------|---------|
| `mincut` | Enable ruvector-mincut integration for real min-cut | No |
| `audit-replay` | Enable decision replay for debugging | No |
```toml
# Full features
cognitum-gate-tilezero = { version = "0.1", features = ["mincut", "audit-replay"] }
```
---
## Security
### Cryptographic Guarantees
| Component | Algorithm | Purpose |
|-----------|-----------|---------|
| Token signing | **Ed25519** | Unforgeable authorization tokens |
| Hash chain | **Blake3** | Tamper-evident audit trail |
| Key derivation | **Deterministic** | Reproducible in test environments |
### Security Considerations
- **Private keys** are generated at TileZero creation and never exported
- **Tokens expire** after `permit_ttl_ns` nanoseconds
- **Hash chain** allows detection of any receipt tampering
- **Constant-time comparison** used for signature verification
---
## Integration with ruQu
TileZero is designed to work with [ruQu](../ruQu/README.md), the quantum coherence assessment system:
```rust
// ruQu provides the coherence data
let ruqu_fabric = ruqu::QuantumFabric::new(config);
// TileZero makes authorization decisions
let tilezero = TileZero::new(thresholds);
// Integration loop
loop {
// ruQu assesses coherence
let reports = ruqu_fabric.collect_tile_reports();
// TileZero merges and decides
tilezero.collect_reports(&reports).await;
// Gate an action
let token = tilezero.decide(&action).await;
}
```
---
## Benchmarks
Run the benchmarks:
```bash
cargo bench -p cognitum-gate-tilezero
```
### Expected Performance
| Operation | Typical Latency |
|-----------|-----------------|
| Token signing (Ed25519) | ~50μs |
| Decision evaluation | ~10μs |
| Receipt append (Blake3) | ~5μs |
| Report merge (per tile) | ~1μs |
---
## Related Crates
| Crate | Purpose |
|-------|---------|
| [ruQu](../ruQu/README.md) | Quantum coherence assessment |
| [ruvector-mincut](../ruvector-mincut/README.md) | Subpolynomial dynamic min-cut |
| [cognitum-gate-kernel](../cognitum-gate-kernel/README.md) | WASM kernel for worker tiles |
---
## License
MIT OR Apache-2.0
---
<p align="center">
<em>"The arbiter sees all tiles. The arbiter decides."</em>
</p>
<p align="center">
<strong>cognitum-gate-tilezero — Central coordination for distributed coherence.</strong>
</p>
<p align="center">
<a href="https://ruv.io">ruv.io</a> •
<a href="https://github.com/ruvnet/ruvector">RuVector</a> •
<a href="https://crates.io/crates/cognitum-gate-tilezero">crates.io</a>
</p>
<p align="center">
<sub>Built with care by the <a href="https://ruv.io">ruv.io</a> team</sub>
</p>

View File

@@ -0,0 +1,638 @@
//! Consolidated benchmarks for cognitum-gate-tilezero
//!
//! Target latencies:
//! - Merge 255 reports: < 10ms
//! - Full gate decision: p99 < 50ms
//! - Receipt hash: < 10us
//! - Chain verify 1000 receipts: < 100ms
//! - Permit sign: < 5ms
//! - Permit verify: < 1ms
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use rand::Rng;
use std::collections::HashMap;
use cognitum_gate_tilezero::{
merge::{EdgeSummary, MergeStrategy, NodeSummary, ReportMerger, WorkerReport},
ActionContext, ActionMetadata, ActionTarget, EvidenceFilter, GateDecision, GateThresholds,
PermitState, PermitToken, ReceiptLog, ReducedGraph, ThreeFilterDecision, TileId, TileZero,
TimestampProof, WitnessReceipt, WitnessSummary,
};
// ============================================================================
// Helper Functions
// ============================================================================
/// Create a test permit token
fn create_test_token(sequence: u64) -> PermitToken {
PermitToken {
decision: GateDecision::Permit,
action_id: format!("action-{}", sequence),
timestamp: 1704067200_000_000_000 + sequence * 1_000_000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
}
}
/// Create a test witness summary
fn create_test_summary() -> WitnessSummary {
let json = serde_json::json!({
"structural": {
"cut_value": 10.5,
"partition": "stable",
"critical_edges": 15,
"boundary": ["edge-1", "edge-2"]
},
"predictive": {
"set_size": 3,
"coverage": 0.95
},
"evidential": {
"e_value": 150.0,
"verdict": "accept"
}
});
serde_json::from_value(json).unwrap()
}
/// Create a test receipt
fn create_test_receipt(sequence: u64, previous_hash: [u8; 32]) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: create_test_token(sequence),
previous_hash,
witness_summary: create_test_summary(),
timestamp_proof: TimestampProof {
timestamp: 1704067200_000_000_000 + sequence * 1_000_000,
previous_receipt_hash: previous_hash,
merkle_root: [0u8; 32],
},
}
}
/// Create a realistic worker report
fn create_worker_report(
tile_id: TileId,
epoch: u64,
node_count: usize,
boundary_edge_count: usize,
) -> WorkerReport {
let mut rng = rand::thread_rng();
let mut report = WorkerReport::new(tile_id, epoch);
for i in 0..node_count {
report.add_node(NodeSummary {
id: format!("node-{}-{}", tile_id, i),
weight: rng.gen_range(0.1..10.0),
edge_count: rng.gen_range(5..50),
coherence: rng.gen_range(0.7..1.0),
});
}
for i in 0..boundary_edge_count {
report.add_boundary_edge(EdgeSummary {
source: format!("node-{}-{}", tile_id, i % node_count.max(1)),
target: format!(
"node-{}-{}",
(tile_id as usize + 1) % 256,
i % node_count.max(1)
),
capacity: rng.gen_range(1.0..100.0),
is_boundary: true,
});
}
report.local_mincut = rng.gen_range(1.0..20.0);
report.confidence = rng.gen_range(0.8..1.0);
report.timestamp_ms = 1704067200_000 + tile_id as u64 * 100;
report
}
/// Create all 255 tile reports
fn create_all_tile_reports(
epoch: u64,
nodes_per_tile: usize,
edges_per_tile: usize,
) -> Vec<WorkerReport> {
(1..=255u8)
.map(|tile_id| create_worker_report(tile_id, epoch, nodes_per_tile, edges_per_tile))
.collect()
}
/// Create action context for benchmarking
fn create_action_context(id: usize) -> ActionContext {
ActionContext {
action_id: format!("action-{}", id),
action_type: "config_change".to_string(),
target: ActionTarget {
device: Some("router-1".to_string()),
path: Some("/config/routing/policy".to_string()),
extra: {
let mut m = HashMap::new();
m.insert("priority".to_string(), serde_json::json!(100));
m
},
},
context: ActionMetadata {
agent_id: "agent-001".to_string(),
session_id: Some("session-12345".to_string()),
prior_actions: vec!["action-prev-1".to_string()],
urgency: "normal".to_string(),
},
}
}
/// Create realistic graph state
fn create_realistic_graph(coherence_level: f64) -> ReducedGraph {
let mut graph = ReducedGraph::new();
for tile_id in 1..=255u8 {
let tile_coherence = (coherence_level + (tile_id as f64 * 0.001) % 0.1) as f32;
graph.update_coherence(tile_id, tile_coherence);
}
graph.set_global_cut(coherence_level * 15.0);
graph.set_evidence(coherence_level * 150.0);
graph.set_shift_pressure(0.1 * (1.0 - coherence_level));
graph
}
// ============================================================================
// 1. Merge Reports Benchmark
// ============================================================================
/// Benchmark merging 255 tile reports (target: < 10ms)
fn bench_merge_reports(c: &mut Criterion) {
let mut group = c.benchmark_group("merge_reports");
group.throughput(Throughput::Elements(255));
// Test different merge strategies
let strategies = [
("simple_average", MergeStrategy::SimpleAverage),
("weighted_average", MergeStrategy::WeightedAverage),
("median", MergeStrategy::Median),
("maximum", MergeStrategy::Maximum),
("byzantine_ft", MergeStrategy::ByzantineFaultTolerant),
];
// Minimal reports (baseline)
let minimal_reports = create_all_tile_reports(0, 1, 2);
for (name, strategy) in &strategies {
let merger = ReportMerger::new(*strategy);
group.bench_with_input(
BenchmarkId::new("255_tiles_minimal", name),
&minimal_reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
// Realistic reports (10 nodes, 5 boundary edges)
let realistic_reports = create_all_tile_reports(0, 10, 5);
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
group.bench_function("255_tiles_realistic", |b| {
b.iter(|| black_box(merger.merge(black_box(&realistic_reports))))
});
// Heavy reports (50 nodes, 20 edges)
let heavy_reports = create_all_tile_reports(0, 50, 20);
group.bench_function("255_tiles_heavy", |b| {
b.iter(|| black_box(merger.merge(black_box(&heavy_reports))))
});
group.finish();
}
// ============================================================================
// 2. Full Gate Decision Benchmark
// ============================================================================
/// Benchmark full gate decision (target: p99 < 50ms)
fn bench_decision(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();
let mut group = c.benchmark_group("gate_decision");
group.throughput(Throughput::Elements(1));
// Full TileZero decision
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds.clone());
let ctx = create_action_context(0);
group.bench_function("tilezero_full_decision", |b| {
b.to_async(&rt)
.iter(|| async { black_box(tilezero.decide(black_box(&ctx)).await) });
});
// Three-filter decision only (no crypto)
let decision = ThreeFilterDecision::new(thresholds);
let graph_states = [
("high_coherence", create_realistic_graph(0.95)),
("medium_coherence", create_realistic_graph(0.7)),
("low_coherence", create_realistic_graph(0.3)),
];
for (name, graph) in &graph_states {
group.bench_with_input(BenchmarkId::new("three_filter", name), graph, |b, graph| {
b.iter(|| black_box(decision.evaluate(black_box(graph))))
});
}
// Batch decisions
for batch_size in [10, 50] {
let contexts: Vec<_> = (0..batch_size).map(create_action_context).collect();
group.bench_with_input(
BenchmarkId::new("batch_sequential", batch_size),
&contexts,
|b, contexts| {
b.to_async(&rt).iter(|| async {
for ctx in contexts {
black_box(tilezero.decide(ctx).await);
}
});
},
);
}
group.finish();
}
// ============================================================================
// 3. Receipt Hash Benchmark
// ============================================================================
/// Benchmark receipt hash computation (target: < 10us)
fn bench_receipt_hash(c: &mut Criterion) {
let mut group = c.benchmark_group("receipt_hash");
group.throughput(Throughput::Elements(1));
let receipt = create_test_receipt(0, [0u8; 32]);
// Single hash
group.bench_function("hash_single", |b| b.iter(|| black_box(receipt.hash())));
// Hash with varying boundary sizes
for boundary_size in [0, 10, 50, 100] {
let mut receipt = create_test_receipt(0, [0u8; 32]);
receipt.witness_summary.structural.boundary = (0..boundary_size)
.map(|i| format!("boundary-edge-{}", i))
.collect();
group.bench_with_input(
BenchmarkId::new("boundary_size", boundary_size),
&receipt,
|b, receipt| b.iter(|| black_box(receipt.hash())),
);
}
// Witness summary hash
let summary = create_test_summary();
group.bench_function("witness_summary_hash", |b| {
b.iter(|| black_box(summary.hash()))
});
group.finish();
}
// ============================================================================
// 4. Receipt Chain Verification Benchmark
// ============================================================================
/// Benchmark receipt chain verification (target: < 100ms for 1000 receipts)
fn bench_receipt_chain_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("receipt_chain_verify");
for chain_length in [100, 500, 1000, 2000] {
group.throughput(Throughput::Elements(chain_length as u64));
// Build the chain
let mut log = ReceiptLog::new();
for i in 0..chain_length {
let receipt = create_test_receipt(i as u64, log.last_hash());
log.append(receipt);
}
group.bench_with_input(
BenchmarkId::new("verify_chain", chain_length),
&log,
|b, log| b.iter(|| black_box(log.verify_chain_to((chain_length - 1) as u64))),
);
}
// Chain building (append) benchmark
group.bench_function("build_chain_1000", |b| {
b.iter(|| {
let mut log = ReceiptLog::new();
for i in 0..1000 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
black_box(log)
})
});
group.finish();
}
// ============================================================================
// 5. Permit Sign Benchmark
// ============================================================================
/// Benchmark permit token signing (target: < 5ms)
fn bench_permit_sign(c: &mut Criterion) {
let mut group = c.benchmark_group("permit_sign");
group.throughput(Throughput::Elements(1));
let state = PermitState::new();
// Single sign
group.bench_function("sign_single", |b| {
b.iter(|| {
let token = create_test_token(black_box(0));
black_box(state.sign_token(token))
})
});
// Sign with varying action_id lengths
for action_len in [10, 50, 100, 500] {
let mut token = create_test_token(0);
token.action_id = "x".repeat(action_len);
group.bench_with_input(
BenchmarkId::new("action_len", action_len),
&token,
|b, token| b.iter(|| black_box(state.sign_token(token.clone()))),
);
}
// Batch signing
for batch_size in [10, 50, 100] {
let tokens: Vec<_> = (0..batch_size)
.map(|i| create_test_token(i as u64))
.collect();
group.bench_with_input(
BenchmarkId::new("batch_sign", batch_size),
&tokens,
|b, tokens| {
b.iter(|| {
let signed: Vec<_> = tokens
.iter()
.cloned()
.map(|t| state.sign_token(t))
.collect();
black_box(signed)
})
},
);
}
// Signable content generation
let token = create_test_token(0);
group.bench_function("signable_content", |b| {
b.iter(|| black_box(token.signable_content()))
});
group.finish();
}
// ============================================================================
// 6. Permit Verify Benchmark
// ============================================================================
/// Benchmark permit token verification (target: < 1ms)
fn bench_permit_verify(c: &mut Criterion) {
let mut group = c.benchmark_group("permit_verify");
group.throughput(Throughput::Elements(1));
let state = PermitState::new();
let verifier = state.verifier();
let signed_token = state.sign_token(create_test_token(0));
// Single verify
group.bench_function("verify_single", |b| {
b.iter(|| black_box(verifier.verify(black_box(&signed_token))))
});
// Token encoding/decoding (often paired with verification)
let encoded = signed_token.encode_base64();
group.bench_function("encode_base64", |b| {
b.iter(|| black_box(signed_token.encode_base64()))
});
group.bench_function("decode_base64", |b| {
b.iter(|| black_box(PermitToken::decode_base64(black_box(&encoded))))
});
group.bench_function("roundtrip_encode_decode", |b| {
b.iter(|| {
let encoded = signed_token.encode_base64();
black_box(PermitToken::decode_base64(&encoded))
})
});
// Batch verification
let signed_tokens: Vec<_> = (0..100)
.map(|i| state.sign_token(create_test_token(i)))
.collect();
group.bench_function("verify_batch_100", |b| {
b.iter(|| {
for token in &signed_tokens {
black_box(verifier.verify(token));
}
})
});
group.finish();
}
// ============================================================================
// Additional Benchmarks
// ============================================================================
/// Benchmark E-value computation
fn bench_evalue_computation(c: &mut Criterion) {
let mut group = c.benchmark_group("evalue_computation");
group.throughput(Throughput::Elements(1));
// Scalar update
for capacity in [10, 100, 1000] {
let mut filter = EvidenceFilter::new(capacity);
for i in 0..capacity {
filter.update(1.0 + (i as f64 * 0.001));
}
group.bench_with_input(
BenchmarkId::new("scalar_update", capacity),
&capacity,
|b, _| {
b.iter(|| {
filter.update(black_box(1.5));
black_box(filter.current())
})
},
);
}
// SIMD-friendly aggregation patterns
let tile_count = 255;
let e_values: Vec<f64> = (0..tile_count).map(|i| 1.0 + (i as f64 * 0.01)).collect();
group.bench_function("aggregate_255_scalar", |b| {
b.iter(|| {
let product: f64 = e_values.iter().product();
black_box(product)
})
});
// Chunked processing (SIMD-friendly)
group.bench_function("aggregate_255_chunked_4", |b| {
b.iter(|| {
let mut accumulator = 1.0f64;
for chunk in e_values.chunks(4) {
let chunk_product: f64 = chunk.iter().product();
accumulator *= chunk_product;
}
black_box(accumulator)
})
});
// Log-sum pattern (numerically stable)
group.bench_function("aggregate_255_log_sum", |b| {
b.iter(|| {
let log_sum: f64 = e_values.iter().map(|x| x.ln()).sum();
black_box(log_sum.exp())
})
});
// Parallel reduction
group.bench_function("aggregate_255_parallel_8", |b| {
b.iter(|| {
let mut lanes = [1.0f64; 8];
for (i, &val) in e_values.iter().enumerate() {
lanes[i % 8] *= val;
}
let result: f64 = lanes.iter().product();
black_box(result)
})
});
group.finish();
}
/// Benchmark graph operations
fn bench_graph_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("graph_operations");
// Coherence updates
for tile_count in [64, 128, 255] {
group.throughput(Throughput::Elements(tile_count as u64));
group.bench_with_input(
BenchmarkId::new("coherence_updates", tile_count),
&tile_count,
|b, &count| {
b.iter(|| {
let mut graph = ReducedGraph::new();
for tile_id in 1..=count as u8 {
graph.update_coherence(tile_id, black_box(0.9));
}
black_box(graph)
})
},
);
}
// Witness summary generation
let graph = create_realistic_graph(0.9);
group.bench_function("witness_summary_generate", |b| {
b.iter(|| black_box(graph.witness_summary()))
});
group.finish();
}
/// Benchmark log operations
fn bench_receipt_log_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("receipt_log_ops");
group.throughput(Throughput::Elements(1));
// Append to various log sizes
for initial_size in [10, 100, 500] {
group.bench_with_input(
BenchmarkId::new("append_to_n", initial_size),
&initial_size,
|b, &size| {
b.iter_batched(
|| {
let mut log = ReceiptLog::new();
for i in 0..size {
let receipt = create_test_receipt(i as u64, log.last_hash());
log.append(receipt);
}
log
},
|mut log| {
let receipt = create_test_receipt(log.len() as u64, log.last_hash());
log.append(receipt);
black_box(log)
},
criterion::BatchSize::SmallInput,
)
},
);
}
// Get receipt
let mut log = ReceiptLog::new();
for i in 0..100 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
group.bench_function("get_receipt", |b| {
b.iter(|| black_box(log.get(black_box(50))))
});
group.finish();
}
// ============================================================================
// Criterion Groups
// ============================================================================
criterion_group!(merge_benches, bench_merge_reports,);
criterion_group!(decision_benches, bench_decision,);
criterion_group!(
crypto_benches,
bench_receipt_hash,
bench_receipt_chain_verify,
bench_permit_sign,
bench_permit_verify,
);
criterion_group!(
additional_benches,
bench_evalue_computation,
bench_graph_operations,
bench_receipt_log_operations,
);
criterion_main!(
merge_benches,
decision_benches,
crypto_benches,
additional_benches
);

View File

@@ -0,0 +1,346 @@
//! Benchmarks for cryptographic operations
//!
//! Target latencies:
//! - Receipt signing: < 5ms
//! - Hash chain verification for 1000 receipts: < 100ms
//! - Permit token encoding/decoding: < 1ms
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use cognitum_gate_tilezero::{
GateDecision, PermitState, PermitToken, ReceiptLog, TimestampProof, WitnessReceipt,
WitnessSummary,
};
/// Create a test permit token
fn create_test_token(sequence: u64) -> PermitToken {
PermitToken {
decision: GateDecision::Permit,
action_id: format!("action-{}", sequence),
timestamp: 1704067200_000_000_000 + sequence * 1_000_000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
}
}
/// Create a test witness summary
fn create_test_summary() -> WitnessSummary {
// Use the public empty constructor and modify through serialization
let json = serde_json::json!({
"structural": {
"cut_value": 10.5,
"partition": "stable",
"critical_edges": 15,
"boundary": ["edge-1", "edge-2"]
},
"predictive": {
"set_size": 3,
"coverage": 0.95
},
"evidential": {
"e_value": 150.0,
"verdict": "accept"
}
});
serde_json::from_value(json).unwrap()
}
/// Create a test receipt
fn create_test_receipt(sequence: u64, previous_hash: [u8; 32]) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: create_test_token(sequence),
previous_hash,
witness_summary: create_test_summary(),
timestamp_proof: TimestampProof {
timestamp: 1704067200_000_000_000 + sequence * 1_000_000,
previous_receipt_hash: previous_hash,
merkle_root: [0u8; 32],
},
}
}
/// Benchmark permit token signing
fn bench_token_signing(c: &mut Criterion) {
let mut group = c.benchmark_group("token_signing");
group.throughput(Throughput::Elements(1));
let state = PermitState::new();
let token = create_test_token(0);
group.bench_function("sign_token", |b| {
b.iter(|| {
let unsigned = create_test_token(black_box(0));
black_box(state.sign_token(unsigned))
})
});
// Benchmark signing with different action_id lengths
for action_len in [10, 50, 100, 500] {
let mut long_token = token.clone();
long_token.action_id = "x".repeat(action_len);
group.bench_with_input(
BenchmarkId::new("sign_action_len", action_len),
&long_token,
|b, token| {
b.iter(|| {
let t = token.clone();
black_box(state.sign_token(t))
})
},
);
}
group.finish();
}
/// Benchmark token verification
fn bench_token_verification(c: &mut Criterion) {
let mut group = c.benchmark_group("token_verification");
group.throughput(Throughput::Elements(1));
let state = PermitState::new();
let verifier = state.verifier();
let signed_token = state.sign_token(create_test_token(0));
group.bench_function("verify_token", |b| {
b.iter(|| black_box(verifier.verify(black_box(&signed_token))))
});
group.finish();
}
/// Benchmark receipt hashing
fn bench_receipt_hashing(c: &mut Criterion) {
let mut group = c.benchmark_group("receipt_hashing");
group.throughput(Throughput::Elements(1));
let receipt = create_test_receipt(0, [0u8; 32]);
group.bench_function("hash_receipt", |b| b.iter(|| black_box(receipt.hash())));
// Benchmark with different summary sizes
for boundary_size in [0, 10, 50, 100] {
let mut receipt = create_test_receipt(0, [0u8; 32]);
receipt.witness_summary.structural.boundary = (0..boundary_size)
.map(|i| format!("boundary-edge-{}", i))
.collect();
group.bench_with_input(
BenchmarkId::new("hash_boundary_size", boundary_size),
&receipt,
|b, receipt| b.iter(|| black_box(receipt.hash())),
);
}
group.finish();
}
/// Benchmark hash chain verification (target: < 100ms for 1000 receipts)
fn bench_chain_verification(c: &mut Criterion) {
let mut group = c.benchmark_group("chain_verification");
for chain_length in [100, 500, 1000, 2000] {
group.throughput(Throughput::Elements(chain_length as u64));
// Build the chain
let mut log = ReceiptLog::new();
for i in 0..chain_length {
let receipt = create_test_receipt(i as u64, log.last_hash());
log.append(receipt);
}
group.bench_with_input(
BenchmarkId::new("verify_chain", chain_length),
&log,
|b, log| b.iter(|| black_box(log.verify_chain_to((chain_length - 1) as u64))),
);
}
group.finish();
}
/// Benchmark receipt log operations
fn bench_receipt_log_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("receipt_log");
group.throughput(Throughput::Elements(1));
// Append benchmarks
group.bench_function("append_single", |b| {
b.iter(|| {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
black_box(log)
})
});
// Benchmark appending to logs of various sizes
for initial_size in [10, 100, 500] {
group.bench_with_input(
BenchmarkId::new("append_to_n", initial_size),
&initial_size,
|b, &size| {
b.iter_batched(
|| {
let mut log = ReceiptLog::new();
for i in 0..size {
let receipt = create_test_receipt(i as u64, log.last_hash());
log.append(receipt);
}
log
},
|mut log| {
let receipt = create_test_receipt(log.len() as u64, log.last_hash());
log.append(receipt);
black_box(log)
},
criterion::BatchSize::SmallInput,
)
},
);
}
// Get benchmarks - recreate log for each get test
let mut existing_log = ReceiptLog::new();
for i in 0..100 {
let receipt = create_test_receipt(i, existing_log.last_hash());
existing_log.append(receipt);
}
group.bench_function("get_receipt", |b| {
b.iter(|| black_box(existing_log.get(black_box(50))))
});
group.finish();
}
/// Benchmark permit token encoding/decoding
fn bench_token_encoding(c: &mut Criterion) {
let mut group = c.benchmark_group("token_encoding");
group.throughput(Throughput::Elements(1));
let state = PermitState::new();
let signed_token = state.sign_token(create_test_token(0));
let encoded = signed_token.encode_base64();
group.bench_function("encode_base64", |b| {
b.iter(|| black_box(signed_token.encode_base64()))
});
group.bench_function("decode_base64", |b| {
b.iter(|| black_box(PermitToken::decode_base64(black_box(&encoded))))
});
group.bench_function("roundtrip", |b| {
b.iter(|| {
let encoded = signed_token.encode_base64();
black_box(PermitToken::decode_base64(&encoded))
})
});
// Benchmark with varying action_id lengths
for action_len in [10, 50, 100, 500] {
let mut token = create_test_token(0);
token.action_id = "x".repeat(action_len);
let signed = state.sign_token(token);
group.bench_with_input(
BenchmarkId::new("encode_action_len", action_len),
&signed,
|b, token| b.iter(|| black_box(token.encode_base64())),
);
}
group.finish();
}
/// Benchmark signable content generation
fn bench_signable_content(c: &mut Criterion) {
let mut group = c.benchmark_group("signable_content");
group.throughput(Throughput::Elements(1));
let token = create_test_token(0);
group.bench_function("generate", |b| {
b.iter(|| black_box(token.signable_content()))
});
// With longer action_id
for action_len in [10, 100, 1000] {
let mut token = create_test_token(0);
token.action_id = "x".repeat(action_len);
group.bench_with_input(
BenchmarkId::new("action_len", action_len),
&token,
|b, token| b.iter(|| black_box(token.signable_content())),
);
}
group.finish();
}
/// Benchmark witness summary hashing
fn bench_witness_summary_hash(c: &mut Criterion) {
let mut group = c.benchmark_group("witness_summary_hash");
group.throughput(Throughput::Elements(1));
let summary = create_test_summary();
group.bench_function("hash", |b| b.iter(|| black_box(summary.hash())));
// JSON serialization (used in hash)
group.bench_function("to_json", |b| b.iter(|| black_box(summary.to_json())));
group.finish();
}
/// Benchmark batch signing (simulating high-throughput scenarios)
fn bench_batch_signing(c: &mut Criterion) {
let mut group = c.benchmark_group("batch_signing");
for batch_size in [10, 50, 100] {
group.throughput(Throughput::Elements(batch_size as u64));
let state = PermitState::new();
let tokens: Vec<_> = (0..batch_size)
.map(|i| create_test_token(i as u64))
.collect();
group.bench_with_input(
BenchmarkId::new("sequential", batch_size),
&tokens,
|b, tokens| {
b.iter(|| {
let signed: Vec<_> = tokens
.iter()
.cloned()
.map(|t| state.sign_token(t))
.collect();
black_box(signed)
})
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_token_signing,
bench_token_verification,
bench_receipt_hashing,
bench_chain_verification,
bench_receipt_log_operations,
bench_token_encoding,
bench_signable_content,
bench_witness_summary_hash,
bench_batch_signing,
);
criterion_main!(benches);

View File

@@ -0,0 +1,339 @@
//! Benchmarks for the full decision pipeline
//!
//! Target latencies:
//! - Gate decision: p99 < 50ms
//! - E-value computation: < 1ms
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use std::collections::HashMap;
use cognitum_gate_tilezero::{
ActionContext, ActionMetadata, ActionTarget, DecisionOutcome, EvidenceFilter, GateThresholds,
ReducedGraph, ThreeFilterDecision, TileZero,
};
/// Create a realistic action context for benchmarking
fn create_action_context(id: usize) -> ActionContext {
ActionContext {
action_id: format!("action-{}", id),
action_type: "config_change".to_string(),
target: ActionTarget {
device: Some("router-1".to_string()),
path: Some("/config/routing/policy".to_string()),
extra: {
let mut m = HashMap::new();
m.insert("priority".to_string(), serde_json::json!(100));
m.insert("region".to_string(), serde_json::json!("us-west-2"));
m
},
},
context: ActionMetadata {
agent_id: "agent-001".to_string(),
session_id: Some("session-12345".to_string()),
prior_actions: vec!["action-prev-1".to_string(), "action-prev-2".to_string()],
urgency: "normal".to_string(),
},
}
}
/// Create a graph with realistic state
fn create_realistic_graph(coherence_level: f64) -> ReducedGraph {
let mut graph = ReducedGraph::new();
// Simulate 255 worker tiles reporting
for tile_id in 1..=255u8 {
// Vary coherence slightly around the target
let tile_coherence = (coherence_level + (tile_id as f64 * 0.001) % 0.1) as f32;
graph.update_coherence(tile_id, tile_coherence);
}
// Set realistic values
graph.set_global_cut(coherence_level * 15.0);
graph.set_evidence(coherence_level * 150.0);
graph.set_shift_pressure(0.1 * (1.0 - coherence_level));
graph
}
/// Benchmark the full TileZero decision pipeline
fn bench_full_decision_pipeline(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();
let mut group = c.benchmark_group("decision_pipeline");
group.throughput(Throughput::Elements(1));
// Benchmark with different threshold configurations
let thresholds_configs = vec![
("default", GateThresholds::default()),
(
"strict",
GateThresholds {
tau_deny: 0.001,
tau_permit: 200.0,
min_cut: 10.0,
max_shift: 0.3,
permit_ttl_ns: 30_000_000_000,
theta_uncertainty: 30.0,
theta_confidence: 3.0,
},
),
(
"relaxed",
GateThresholds {
tau_deny: 0.1,
tau_permit: 50.0,
min_cut: 2.0,
max_shift: 0.8,
permit_ttl_ns: 120_000_000_000,
theta_uncertainty: 10.0,
theta_confidence: 10.0,
},
),
];
for (name, thresholds) in thresholds_configs {
let tilezero = TileZero::new(thresholds);
let ctx = create_action_context(0);
group.bench_with_input(BenchmarkId::new("tilezero_decide", name), &ctx, |b, ctx| {
b.to_async(&rt)
.iter(|| async { black_box(tilezero.decide(black_box(ctx)).await) });
});
}
group.finish();
}
/// Benchmark the three-filter decision logic
fn bench_three_filter_decision(c: &mut Criterion) {
let mut group = c.benchmark_group("three_filter_decision");
group.throughput(Throughput::Elements(1));
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
// Test different graph states
let graph_states = vec![
("high_coherence", create_realistic_graph(0.95)),
("medium_coherence", create_realistic_graph(0.7)),
("low_coherence", create_realistic_graph(0.3)),
];
for (name, graph) in graph_states {
group.bench_with_input(BenchmarkId::new("evaluate", name), &graph, |b, graph| {
b.iter(|| black_box(decision.evaluate(black_box(graph))))
});
}
group.finish();
}
/// Benchmark E-value computation (scalar)
fn bench_e_value_scalar(c: &mut Criterion) {
let mut group = c.benchmark_group("e_value_computation");
group.throughput(Throughput::Elements(1));
// Test different filter capacities
for capacity in [10, 100, 1000] {
let mut filter = EvidenceFilter::new(capacity);
// Pre-fill the filter
for i in 0..capacity {
filter.update(1.0 + (i as f64 * 0.001));
}
group.bench_with_input(
BenchmarkId::new("scalar_update", capacity),
&capacity,
|b, _| {
b.iter(|| {
filter.update(black_box(1.5));
black_box(filter.current())
})
},
);
}
group.finish();
}
/// Benchmark E-value computation with SIMD-friendly patterns
fn bench_e_value_simd(c: &mut Criterion) {
let mut group = c.benchmark_group("e_value_simd");
// Simulate SIMD batch processing of 255 tile e-values
let tile_count = 255;
group.throughput(Throughput::Elements(tile_count as u64));
// Generate test data aligned for SIMD
let e_values: Vec<f64> = (0..tile_count).map(|i| 1.0 + (i as f64 * 0.01)).collect();
// Scalar baseline
group.bench_function("aggregate_scalar", |b| {
b.iter(|| {
let product: f64 = e_values.iter().product();
black_box(product)
})
});
// Chunked processing (SIMD-friendly)
group.bench_function("aggregate_chunked_4", |b| {
b.iter(|| {
let mut accumulator = 1.0f64;
for chunk in e_values.chunks(4) {
let chunk_product: f64 = chunk.iter().product();
accumulator *= chunk_product;
}
black_box(accumulator)
})
});
// Parallel reduction pattern
group.bench_function("aggregate_parallel_reduction", |b| {
b.iter(|| {
// Split into 8 lanes for potential SIMD
let mut lanes = [1.0f64; 8];
for (i, &val) in e_values.iter().enumerate() {
lanes[i % 8] *= val;
}
let result: f64 = lanes.iter().product();
black_box(result)
})
});
group.finish();
}
/// Benchmark decision outcome creation
fn bench_decision_outcome(c: &mut Criterion) {
let mut group = c.benchmark_group("decision_outcome");
group.throughput(Throughput::Elements(1));
group.bench_function("create_permit", |b| {
b.iter(|| {
black_box(DecisionOutcome::permit(
black_box(0.95),
black_box(1.0),
black_box(0.9),
black_box(0.95),
black_box(10.0),
))
})
});
group.bench_function("create_deny", |b| {
b.iter(|| {
black_box(DecisionOutcome::deny(
cognitum_gate_tilezero::DecisionFilter::Structural,
"Low coherence".to_string(),
black_box(0.3),
black_box(0.5),
black_box(0.2),
black_box(2.0),
))
})
});
group.bench_function("create_defer", |b| {
b.iter(|| {
black_box(DecisionOutcome::defer(
cognitum_gate_tilezero::DecisionFilter::Shift,
"High shift pressure".to_string(),
black_box(0.8),
black_box(0.3),
black_box(0.7),
black_box(6.0),
))
})
});
group.finish();
}
/// Benchmark witness summary generation
fn bench_witness_summary(c: &mut Criterion) {
let mut group = c.benchmark_group("witness_summary");
group.throughput(Throughput::Elements(1));
let graph = create_realistic_graph(0.9);
group.bench_function("generate", |b| {
b.iter(|| black_box(graph.witness_summary()))
});
let summary = graph.witness_summary();
group.bench_function("hash", |b| b.iter(|| black_box(summary.hash())));
group.bench_function("to_json", |b| b.iter(|| black_box(summary.to_json())));
group.finish();
}
/// Benchmark batch decision processing
fn bench_batch_decisions(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();
let mut group = c.benchmark_group("batch_decisions");
for batch_size in [10, 50, 100] {
group.throughput(Throughput::Elements(batch_size as u64));
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
let contexts: Vec<_> = (0..batch_size).map(create_action_context).collect();
group.bench_with_input(
BenchmarkId::new("sequential", batch_size),
&contexts,
|b, contexts| {
b.to_async(&rt).iter(|| async {
for ctx in contexts {
black_box(tilezero.decide(ctx).await);
}
});
},
);
}
group.finish();
}
/// Benchmark graph updates from tile reports
fn bench_graph_updates(c: &mut Criterion) {
let mut group = c.benchmark_group("graph_updates");
for tile_count in [64, 128, 255] {
group.throughput(Throughput::Elements(tile_count as u64));
group.bench_with_input(
BenchmarkId::new("coherence_updates", tile_count),
&tile_count,
|b, &count| {
b.iter(|| {
let mut graph = ReducedGraph::new();
for tile_id in 1..=count as u8 {
graph.update_coherence(tile_id, black_box(0.9));
}
black_box(graph)
})
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_full_decision_pipeline,
bench_three_filter_decision,
bench_e_value_scalar,
bench_e_value_simd,
bench_decision_outcome,
bench_witness_summary,
bench_batch_decisions,
bench_graph_updates,
);
criterion_main!(benches);

View File

@@ -0,0 +1,374 @@
//! Benchmarks for report merging from 255 worker tiles
//!
//! Target latencies:
//! - Merge 255 tile reports: < 10ms
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use rand::Rng;
use cognitum_gate_tilezero::{
merge::{EdgeSummary, MergeStrategy, NodeSummary, ReportMerger, WorkerReport},
TileId,
};
/// Create a realistic worker report with configurable complexity
fn create_worker_report(
tile_id: TileId,
epoch: u64,
node_count: usize,
boundary_edge_count: usize,
) -> WorkerReport {
let mut rng = rand::thread_rng();
let mut report = WorkerReport::new(tile_id, epoch);
// Add nodes
for i in 0..node_count {
report.add_node(NodeSummary {
id: format!("node-{}-{}", tile_id, i),
weight: rng.gen_range(0.1..10.0),
edge_count: rng.gen_range(5..50),
coherence: rng.gen_range(0.7..1.0),
});
}
// Add boundary edges
for i in 0..boundary_edge_count {
report.add_boundary_edge(EdgeSummary {
source: format!("node-{}-{}", tile_id, i % node_count.max(1)),
target: format!(
"node-{}-{}",
(tile_id as usize + 1) % 256,
i % node_count.max(1)
),
capacity: rng.gen_range(1.0..100.0),
is_boundary: true,
});
}
report.local_mincut = rng.gen_range(1.0..20.0);
report.confidence = rng.gen_range(0.8..1.0);
report.timestamp_ms = 1704067200_000 + tile_id as u64 * 100;
report
}
/// Create a batch of worker reports from all 255 tiles
fn create_all_tile_reports(
epoch: u64,
nodes_per_tile: usize,
boundary_edges_per_tile: usize,
) -> Vec<WorkerReport> {
(1..=255u8)
.map(|tile_id| {
create_worker_report(tile_id, epoch, nodes_per_tile, boundary_edges_per_tile)
})
.collect()
}
/// Benchmark merging 255 tile reports (target: < 10ms)
fn bench_merge_255_tiles(c: &mut Criterion) {
let mut group = c.benchmark_group("merge_255_tiles");
group.throughput(Throughput::Elements(255));
// Test different merge strategies
let strategies = vec![
("simple_average", MergeStrategy::SimpleAverage),
("weighted_average", MergeStrategy::WeightedAverage),
("median", MergeStrategy::Median),
("maximum", MergeStrategy::Maximum),
("byzantine_ft", MergeStrategy::ByzantineFaultTolerant),
];
// Minimal reports (fast path)
let minimal_reports = create_all_tile_reports(0, 1, 2);
for (name, strategy) in &strategies {
let merger = ReportMerger::new(*strategy);
group.bench_with_input(
BenchmarkId::new("minimal", name),
&minimal_reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
// Realistic reports (10 nodes, 5 boundary edges per tile)
let realistic_reports = create_all_tile_reports(0, 10, 5);
for (name, strategy) in &strategies {
let merger = ReportMerger::new(*strategy);
group.bench_with_input(
BenchmarkId::new("realistic", name),
&realistic_reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
// Heavy reports (50 nodes, 20 boundary edges per tile)
let heavy_reports = create_all_tile_reports(0, 50, 20);
for (name, strategy) in &strategies {
let merger = ReportMerger::new(*strategy);
group.bench_with_input(
BenchmarkId::new("heavy", name),
&heavy_reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
group.finish();
}
/// Benchmark scaling with tile count
fn bench_merge_scaling(c: &mut Criterion) {
let mut group = c.benchmark_group("merge_scaling");
for tile_count in [32, 64, 128, 192, 255] {
group.throughput(Throughput::Elements(tile_count as u64));
let reports: Vec<_> = (1..=tile_count as u8)
.map(|tile_id| create_worker_report(tile_id, 0, 10, 5))
.collect();
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
group.bench_with_input(
BenchmarkId::new("tiles", tile_count),
&reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
group.finish();
}
/// Benchmark node merging specifically
fn bench_node_merging(c: &mut Criterion) {
let mut group = c.benchmark_group("node_merging");
// Create reports with overlapping nodes (realistic for boundary merging)
let create_overlapping_reports = |overlap_factor: usize| -> Vec<WorkerReport> {
(1..=255u8)
.map(|tile_id| {
let mut report = WorkerReport::new(tile_id, 0);
// Local nodes
for i in 0..10 {
report.add_node(NodeSummary {
id: format!("local-{}-{}", tile_id, i),
weight: 1.0,
edge_count: 10,
coherence: 0.9,
});
}
// Shared/overlapping nodes
for i in 0..overlap_factor {
report.add_node(NodeSummary {
id: format!("shared-{}", i),
weight: tile_id as f64 * 0.1,
edge_count: 5,
coherence: 0.95,
});
}
report
})
.collect()
};
for overlap in [0, 5, 10, 20] {
let reports = create_overlapping_reports(overlap);
let merger = ReportMerger::new(MergeStrategy::WeightedAverage);
group.bench_with_input(
BenchmarkId::new("overlap_nodes", overlap),
&reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
group.finish();
}
/// Benchmark edge merging specifically
fn bench_edge_merging(c: &mut Criterion) {
let mut group = c.benchmark_group("edge_merging");
// Create reports with many boundary edges
let create_edge_heavy_reports = |edges_per_tile: usize| -> Vec<WorkerReport> {
(1..=255u8)
.map(|tile_id| create_worker_report(tile_id, 0, 5, edges_per_tile))
.collect()
};
for edge_count in [5, 10, 25, 50] {
let reports = create_edge_heavy_reports(edge_count);
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
// Total edges = 255 tiles * edges_per_tile
group.throughput(Throughput::Elements((255 * edge_count) as u64));
group.bench_with_input(
BenchmarkId::new("edges_per_tile", edge_count),
&reports,
|b, reports| b.iter(|| black_box(merger.merge(black_box(reports)))),
);
}
group.finish();
}
/// Benchmark state hash computation
fn bench_state_hash(c: &mut Criterion) {
let mut group = c.benchmark_group("state_hash");
group.throughput(Throughput::Elements(1));
let small_report = create_worker_report(1, 0, 5, 2);
let large_report = create_worker_report(1, 0, 100, 50);
group.bench_function("compute_small", |b| {
b.iter(|| {
let mut report = small_report.clone();
report.compute_state_hash();
black_box(report.state_hash)
})
});
group.bench_function("compute_large", |b| {
b.iter(|| {
let mut report = large_report.clone();
report.compute_state_hash();
black_box(report.state_hash)
})
});
group.finish();
}
/// Benchmark global mincut estimation
fn bench_mincut_estimation(c: &mut Criterion) {
let mut group = c.benchmark_group("mincut_estimation");
for tile_count in [64, 128, 255] {
group.throughput(Throughput::Elements(tile_count as u64));
let reports: Vec<_> = (1..=tile_count as u8)
.map(|tile_id| create_worker_report(tile_id, 0, 10, 8))
.collect();
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
group.bench_with_input(
BenchmarkId::new("tiles", tile_count),
&reports,
|b, reports| {
b.iter(|| {
let merged = merger.merge(reports).unwrap();
black_box(merged.global_mincut_estimate)
})
},
);
}
group.finish();
}
/// Benchmark confidence aggregation
fn bench_confidence_aggregation(c: &mut Criterion) {
let mut group = c.benchmark_group("confidence_aggregation");
let strategies = vec![
("simple_average", MergeStrategy::SimpleAverage),
("byzantine_ft", MergeStrategy::ByzantineFaultTolerant),
];
let reports = create_all_tile_reports(0, 5, 3);
for (name, strategy) in strategies {
let merger = ReportMerger::new(strategy);
group.bench_with_input(
BenchmarkId::new("strategy", name),
&reports,
|b, reports| {
b.iter(|| {
let merged = merger.merge(reports).unwrap();
black_box(merged.confidence)
})
},
);
}
group.finish();
}
/// Benchmark epoch validation in merge
fn bench_epoch_validation(c: &mut Criterion) {
let mut group = c.benchmark_group("epoch_validation");
// All same epoch (should pass)
let valid_reports = create_all_tile_reports(42, 5, 3);
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
group.bench_function("valid_epochs", |b| {
b.iter(|| black_box(merger.merge(black_box(&valid_reports))))
});
// Mixed epochs (should fail fast)
let mut invalid_reports = valid_reports.clone();
invalid_reports[100] = create_worker_report(101, 43, 5, 3); // Different epoch
group.bench_function("invalid_epochs", |b| {
b.iter(|| black_box(merger.merge(black_box(&invalid_reports))))
});
group.finish();
}
/// Benchmark merged report access patterns
fn bench_merged_report_access(c: &mut Criterion) {
let mut group = c.benchmark_group("merged_report_access");
let reports = create_all_tile_reports(0, 10, 5);
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let merged = merger.merge(&reports).unwrap();
group.bench_function("iterate_nodes", |b| {
b.iter(|| {
let sum: f64 = merged.super_nodes.values().map(|n| n.weight).sum();
black_box(sum)
})
});
group.bench_function("iterate_edges", |b| {
b.iter(|| {
let sum: f64 = merged.boundary_edges.iter().map(|e| e.capacity).sum();
black_box(sum)
})
});
group.bench_function("lookup_node", |b| {
b.iter(|| black_box(merged.super_nodes.get("node-128-5")))
});
group.finish();
}
criterion_group!(
benches,
bench_merge_255_tiles,
bench_merge_scaling,
bench_node_merging,
bench_edge_merging,
bench_state_hash,
bench_mincut_estimation,
bench_confidence_aggregation,
bench_epoch_validation,
bench_merged_report_access,
);
criterion_main!(benches);

View File

@@ -0,0 +1,93 @@
//! Basic Coherence Gate Example
//!
//! This example demonstrates:
//! - Creating a TileZero arbiter
//! - Evaluating an action
//! - Verifying the permit token
//!
//! Run with: cargo run --example basic_gate
use cognitum_gate_tilezero::{
ActionContext, ActionMetadata, ActionTarget, GateDecision, GateThresholds, TileZero,
};
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("=== Cognitum Coherence Gate - Basic Example ===\n");
// Create TileZero with default thresholds
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
println!("TileZero initialized with thresholds:");
println!(" Min cut: {}", tilezero.thresholds().min_cut);
println!(" Max shift: {}", tilezero.thresholds().max_shift);
println!(
" Deny threshold (tau_deny): {}",
tilezero.thresholds().tau_deny
);
println!(
" Permit threshold (tau_permit): {}",
tilezero.thresholds().tau_permit
);
println!();
// Create an action context
let action = ActionContext {
action_id: "config-push-001".to_string(),
action_type: "config_change".to_string(),
target: ActionTarget {
device: Some("router-west-03".to_string()),
path: Some("/network/interfaces/eth0".to_string()),
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: "ops-agent-12".to_string(),
session_id: Some("sess-abc123".to_string()),
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
println!("Evaluating action:");
println!(" ID: {}", action.action_id);
println!(" Type: {}", action.action_type);
println!(" Agent: {}", action.context.agent_id);
println!(" Target: {:?}", action.target.device);
println!();
// Evaluate the action
let token = tilezero.decide(&action).await;
// Display result
match token.decision {
GateDecision::Permit => {
println!("Decision: PERMIT");
println!(" The action is allowed to proceed.");
}
GateDecision::Defer => {
println!("Decision: DEFER");
println!(" Human review required.");
}
GateDecision::Deny => {
println!("Decision: DENY");
println!(" Action blocked due to safety concerns.");
}
}
println!("\nToken details:");
println!(" Sequence: {}", token.sequence);
println!(" Valid until: {} ns", token.timestamp + token.ttl_ns);
println!(" Witness hash: {:02x?}", &token.witness_hash[..8]);
// Verify the token
let verifier = tilezero.verifier();
match verifier.verify(&token) {
Ok(()) => println!("\nToken signature: VALID"),
Err(e) => println!("\nToken signature: INVALID - {:?}", e),
}
println!("\n=== Example Complete ===");
Ok(())
}

View File

@@ -0,0 +1,114 @@
//! Human Escalation Example
//!
//! This example demonstrates the hybrid agent/human workflow:
//! - Detecting when human review is needed (DEFER)
//! - Presenting the escalation context
//!
//! Run with: cargo run --example human_escalation
use cognitum_gate_tilezero::{
ActionContext, ActionMetadata, ActionTarget, GateDecision, GateThresholds, TileZero,
};
use std::collections::HashMap;
use std::io::{self, Write};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("=== Cognitum Coherence Gate - Human Escalation Example ===\n");
// Create TileZero with conservative thresholds to trigger DEFER
let thresholds = GateThresholds {
min_cut: 15.0, // Higher threshold
max_shift: 0.3, // Lower tolerance for shift
tau_deny: 0.01,
tau_permit: 100.0,
permit_ttl_ns: 300_000_000_000, // 5 minutes
theta_uncertainty: 10.0,
theta_confidence: 3.0,
};
let tilezero = TileZero::new(thresholds);
// Simulate a risky action
let action = ActionContext {
action_id: "critical-update-042".to_string(),
action_type: "database_migration".to_string(),
target: ActionTarget {
device: Some("production-db-primary".to_string()),
path: Some("/data/schema".to_string()),
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: "migration-agent".to_string(),
session_id: Some("migration-session".to_string()),
prior_actions: vec![],
urgency: "high".to_string(),
},
};
println!("Evaluating high-risk action:");
println!(" Type: {}", action.action_type);
println!(" Target: {:?}", action.target.device);
println!();
// Evaluate - this may trigger DEFER due to conservative thresholds
let token = tilezero.decide(&action).await;
if token.decision == GateDecision::Defer {
println!("Decision: DEFER - Human review required\n");
// Display escalation context
println!("┌─────────────────────────────────────────────────────┐");
println!("│ HUMAN DECISION REQUIRED │");
println!("├─────────────────────────────────────────────────────┤");
println!("│ Action: {}", action.action_id);
println!("│ Target: {:?}", action.target.device);
println!("│ │");
println!("│ Why deferred: │");
println!("│ • High-risk target (production database) │");
println!("│ • Action type: database_migration │");
println!("│ │");
println!("│ Options: │");
println!("│ [1] APPROVE - Allow the action │");
println!("│ [2] DENY - Block the action │");
println!("│ [3] ESCALATE - Need more review │");
println!("└─────────────────────────────────────────────────────┘");
println!();
// Get human input
print!("Enter your decision (1/2/3): ");
io::stdout().flush()?;
let mut input = String::new();
io::stdin().read_line(&mut input)?;
match input.trim() {
"1" => {
println!("\nYou chose: APPROVE");
println!("In production, this would:");
println!(" - Record the approval with your identity");
println!(" - Generate a new PERMIT token");
println!(" - Log the decision to the audit trail");
}
"2" => {
println!("\nYou chose: DENY");
println!("In production, this would:");
println!(" - Record the denial with your identity");
println!(" - Block the action permanently");
println!(" - Alert the requesting agent");
}
_ => {
println!("\nYou chose: ESCALATE");
println!("In production, this would:");
println!(" - Forward to Tier 3 (policy team)");
println!(" - Extend the timeout");
println!(" - Provide additional context");
}
}
} else {
println!("Decision: {:?}", token.decision);
println!("(Automatic - no human review needed)");
}
println!("\n=== Example Complete ===");
Ok(())
}

View File

@@ -0,0 +1,103 @@
//! Receipt Audit Trail Example
//!
//! This example demonstrates:
//! - Generating multiple decisions
//! - Accessing the receipt log
//! - Verifying hash chain integrity
//!
//! Run with: cargo run --example receipt_audit
use cognitum_gate_tilezero::{
ActionContext, ActionMetadata, ActionTarget, GateThresholds, TileZero,
};
use std::collections::HashMap;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("=== Cognitum Coherence Gate - Receipt Audit Example ===\n");
let tilezero = TileZero::new(GateThresholds::default());
// Generate several decisions
let actions = vec![
("action-001", "config_read", "agent-1", "router-1"),
("action-002", "config_write", "agent-1", "router-1"),
("action-003", "restart", "agent-2", "service-a"),
("action-004", "deploy", "agent-3", "cluster-prod"),
("action-005", "rollback", "agent-3", "cluster-prod"),
];
println!("Generating decisions...\n");
for (id, action_type, agent, target) in &actions {
let action = ActionContext {
action_id: id.to_string(),
action_type: action_type.to_string(),
target: ActionTarget {
device: Some(target.to_string()),
path: None,
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: agent.to_string(),
session_id: None,
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
let token = tilezero.decide(&action).await;
println!(" {} -> {:?}", id, token.decision);
}
println!("\n--- Audit Trail ---\n");
// Verify the hash chain
match tilezero.verify_receipt_chain().await {
Ok(()) => println!("Hash chain: VERIFIED"),
Err(e) => println!("Hash chain: BROKEN - {:?}", e),
}
// Display receipt summary
println!("\nReceipts:");
println!("{:-<60}", "");
println!(
"{:<10} {:<15} {:<12} {:<20}",
"Seq", "Action", "Decision", "Hash (first 8)"
);
println!("{:-<60}", "");
for seq in 0..actions.len() as u64 {
if let Some(receipt) = tilezero.get_receipt(seq).await {
let hash = receipt.hash();
let hash_hex = hex::encode(&hash[..4]);
println!(
"{:<10} {:<15} {:<12} {}...",
receipt.sequence,
receipt.token.action_id,
format!("{:?}", receipt.token.decision),
hash_hex
);
}
}
println!("{:-<60}", "");
// Export for compliance
println!("\nExporting audit log...");
let audit_json = tilezero.export_receipts_json().await?;
let filename = format!(
"audit_log_{}.json",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
);
println!(" Would write {} bytes to {}", audit_json.len(), filename);
println!(" (Skipping actual file write in example)");
println!("\n=== Example Complete ===");
Ok(())
}

View File

@@ -0,0 +1,538 @@
//! Gate decision types, thresholds, and three-filter decision logic
//!
//! This module implements the three-filter decision process:
//! 1. Structural filter - based on min-cut analysis
//! 2. Shift filter - drift detection from expected patterns
//! 3. Evidence filter - confidence score threshold
//!
//! ## Performance Optimizations
//!
//! - VecDeque for O(1) history rotation (instead of Vec::remove(0))
//! - Inline score calculation functions
//! - Pre-computed threshold reciprocals for division optimization
//! - Early-exit evaluation order (most likely failures first)
use std::collections::VecDeque;
use serde::{Deserialize, Serialize};
use crate::supergraph::ReducedGraph;
/// Gate decision: Permit, Defer, or Deny
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum GateDecision {
/// Action is permitted - stable enough to proceed
Permit,
/// Action is deferred - uncertain, escalate to human/stronger model
Defer,
/// Action is denied - unstable or policy-violating
Deny,
}
impl std::fmt::Display for GateDecision {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GateDecision::Permit => write!(f, "permit"),
GateDecision::Defer => write!(f, "defer"),
GateDecision::Deny => write!(f, "deny"),
}
}
}
/// Evidence filter decision
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum EvidenceDecision {
/// Sufficient evidence of coherence
Accept,
/// Insufficient evidence either way
Continue,
/// Strong evidence of incoherence
Reject,
}
/// Filter type in the decision process
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum DecisionFilter {
/// Min-cut based structural analysis
Structural,
/// Drift detection from patterns
Shift,
/// Confidence/evidence threshold
Evidence,
}
impl std::fmt::Display for DecisionFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DecisionFilter::Structural => write!(f, "Structural"),
DecisionFilter::Shift => write!(f, "Shift"),
DecisionFilter::Evidence => write!(f, "Evidence"),
}
}
}
/// Outcome of the three-filter decision process
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DecisionOutcome {
/// The gate decision
pub decision: GateDecision,
/// Overall confidence score (0.0 - 1.0)
pub confidence: f64,
/// Which filter rejected (if any)
pub rejected_by: Option<DecisionFilter>,
/// Reason for rejection (if rejected)
pub rejection_reason: Option<String>,
/// Structural filter score
pub structural_score: f64,
/// Shift filter score
pub shift_score: f64,
/// Evidence filter score
pub evidence_score: f64,
/// Min-cut value from structural analysis
pub mincut_value: f64,
}
impl DecisionOutcome {
/// Create a permit outcome
#[inline]
pub fn permit(
confidence: f64,
structural: f64,
shift: f64,
evidence: f64,
mincut: f64,
) -> Self {
Self {
decision: GateDecision::Permit,
confidence,
rejected_by: None,
rejection_reason: None,
structural_score: structural,
shift_score: shift,
evidence_score: evidence,
mincut_value: mincut,
}
}
/// Create a deferred outcome
#[inline]
pub fn defer(
filter: DecisionFilter,
reason: String,
structural: f64,
shift: f64,
evidence: f64,
mincut: f64,
) -> Self {
// OPTIMIZATION: Multiply by reciprocal instead of divide
let confidence = (structural + shift + evidence) * (1.0 / 3.0);
Self {
decision: GateDecision::Defer,
confidence,
rejected_by: Some(filter),
rejection_reason: Some(reason),
structural_score: structural,
shift_score: shift,
evidence_score: evidence,
mincut_value: mincut,
}
}
/// Create a denied outcome
#[inline]
pub fn deny(
filter: DecisionFilter,
reason: String,
structural: f64,
shift: f64,
evidence: f64,
mincut: f64,
) -> Self {
// OPTIMIZATION: Multiply by reciprocal instead of divide
let confidence = (structural + shift + evidence) * (1.0 / 3.0);
Self {
decision: GateDecision::Deny,
confidence,
rejected_by: Some(filter),
rejection_reason: Some(reason),
structural_score: structural,
shift_score: shift,
evidence_score: evidence,
mincut_value: mincut,
}
}
}
/// Threshold configuration for the gate
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GateThresholds {
/// E-process level indicating incoherence (default: 0.01)
pub tau_deny: f64,
/// E-process level indicating coherence (default: 100.0)
pub tau_permit: f64,
/// Minimum cut value for structural stability
pub min_cut: f64,
/// Maximum shift pressure before deferral
pub max_shift: f64,
/// Permit token TTL in nanoseconds
pub permit_ttl_ns: u64,
/// Conformal set size requiring deferral
pub theta_uncertainty: f64,
/// Conformal set size for confident permit
pub theta_confidence: f64,
}
impl Default for GateThresholds {
fn default() -> Self {
Self {
tau_deny: 0.01,
tau_permit: 100.0,
min_cut: 5.0,
max_shift: 0.5,
permit_ttl_ns: 60_000_000_000, // 60 seconds
theta_uncertainty: 20.0,
theta_confidence: 5.0,
}
}
}
/// Three-filter decision evaluator
///
/// Implements the core decision logic for the coherence gate:
/// 1. Structural filter - checks min-cut stability
/// 2. Shift filter - detects drift from baseline
/// 3. Evidence filter - validates confidence threshold
///
/// OPTIMIZATION: Uses VecDeque for O(1) history rotation instead of Vec::remove(0)
pub struct ThreeFilterDecision {
/// Gate thresholds
thresholds: GateThresholds,
/// Pre-computed reciprocals for fast division
/// OPTIMIZATION: Avoid division in hot path
inv_min_cut: f64,
inv_max_shift: f64,
inv_tau_range: f64,
/// Historical baseline for shift detection
baseline_mincut: Option<f64>,
/// Window of recent mincut values for drift detection
/// OPTIMIZATION: VecDeque for O(1) push_back and pop_front
mincut_history: VecDeque<f64>,
/// Maximum history size
history_size: usize,
}
impl ThreeFilterDecision {
/// Create a new three-filter decision evaluator
pub fn new(thresholds: GateThresholds) -> Self {
// OPTIMIZATION: Pre-compute reciprocals for fast division
let inv_min_cut = 1.0 / thresholds.min_cut;
let inv_max_shift = 1.0 / thresholds.max_shift;
let inv_tau_range = 1.0 / (thresholds.tau_permit - thresholds.tau_deny);
Self {
thresholds,
inv_min_cut,
inv_max_shift,
inv_tau_range,
baseline_mincut: None,
// OPTIMIZATION: Use VecDeque for O(1) rotation
mincut_history: VecDeque::with_capacity(100),
history_size: 100,
}
}
/// Set baseline min-cut for shift detection
#[inline]
pub fn set_baseline(&mut self, baseline: f64) {
self.baseline_mincut = Some(baseline);
}
/// Update history with a new min-cut observation
///
/// OPTIMIZATION: Uses VecDeque for O(1) push/pop instead of Vec::remove(0) which is O(n)
#[inline]
pub fn observe_mincut(&mut self, mincut: f64) {
// OPTIMIZATION: VecDeque::push_back + pop_front is O(1)
if self.mincut_history.len() >= self.history_size {
self.mincut_history.pop_front();
}
self.mincut_history.push_back(mincut);
// Update baseline if not set
if self.baseline_mincut.is_none() && !self.mincut_history.is_empty() {
self.baseline_mincut = Some(self.compute_baseline());
}
}
/// Compute baseline from history
///
/// OPTIMIZATION: Uses iterator sum for cache-friendly access
#[inline]
fn compute_baseline(&self) -> f64 {
let len = self.mincut_history.len();
if len == 0 {
return 0.0;
}
let sum: f64 = self.mincut_history.iter().sum();
sum / len as f64
}
/// Evaluate a request against the three filters
///
/// OPTIMIZATION: Uses pre-computed reciprocals for division,
/// inline score calculations, early-exit on failures
#[inline]
pub fn evaluate(&self, graph: &ReducedGraph) -> DecisionOutcome {
let mincut_value = graph.global_cut();
let shift_pressure = graph.aggregate_shift_pressure();
let e_value = graph.aggregate_evidence();
// 1. Structural Filter - Min-cut analysis
// OPTIMIZATION: Use pre-computed reciprocal
let structural_score = self.compute_structural_score(mincut_value);
if mincut_value < self.thresholds.min_cut {
return DecisionOutcome::deny(
DecisionFilter::Structural,
format!(
"Min-cut {:.3} below threshold {:.3}",
mincut_value, self.thresholds.min_cut
),
structural_score,
0.0,
0.0,
mincut_value,
);
}
// 2. Shift Filter - Drift detection
// OPTIMIZATION: Use pre-computed reciprocal
let shift_score = self.compute_shift_score(shift_pressure);
if shift_pressure >= self.thresholds.max_shift {
return DecisionOutcome::defer(
DecisionFilter::Shift,
format!(
"Shift pressure {:.3} exceeds threshold {:.3}",
shift_pressure, self.thresholds.max_shift
),
structural_score,
shift_score,
0.0,
mincut_value,
);
}
// 3. Evidence Filter - E-value threshold
// OPTIMIZATION: Use pre-computed reciprocal
let evidence_score = self.compute_evidence_score(e_value);
if e_value < self.thresholds.tau_deny {
return DecisionOutcome::deny(
DecisionFilter::Evidence,
format!(
"E-value {:.3} below denial threshold {:.3}",
e_value, self.thresholds.tau_deny
),
structural_score,
shift_score,
evidence_score,
mincut_value,
);
}
if e_value < self.thresholds.tau_permit {
return DecisionOutcome::defer(
DecisionFilter::Evidence,
format!(
"E-value {:.3} below permit threshold {:.3}",
e_value, self.thresholds.tau_permit
),
structural_score,
shift_score,
evidence_score,
mincut_value,
);
}
// All filters passed
// OPTIMIZATION: Multiply by reciprocal
let confidence = (structural_score + shift_score + evidence_score) * (1.0 / 3.0);
DecisionOutcome::permit(
confidence,
structural_score,
shift_score,
evidence_score,
mincut_value,
)
}
/// Compute structural score from min-cut value
///
/// OPTIMIZATION: Uses pre-computed reciprocal, marked inline(always)
#[inline(always)]
fn compute_structural_score(&self, mincut_value: f64) -> f64 {
if mincut_value >= self.thresholds.min_cut {
1.0
} else {
// OPTIMIZATION: Multiply by reciprocal instead of divide
mincut_value * self.inv_min_cut
}
}
/// Compute shift score from shift pressure
///
/// OPTIMIZATION: Uses pre-computed reciprocal, marked inline(always)
#[inline(always)]
fn compute_shift_score(&self, shift_pressure: f64) -> f64 {
// OPTIMIZATION: Multiply by reciprocal, use f64::min for branchless
1.0 - (shift_pressure * self.inv_max_shift).min(1.0)
}
/// Compute evidence score from e-value
///
/// OPTIMIZATION: Uses pre-computed reciprocal, marked inline(always)
#[inline(always)]
fn compute_evidence_score(&self, e_value: f64) -> f64 {
if e_value >= self.thresholds.tau_permit {
1.0
} else if e_value <= self.thresholds.tau_deny {
0.0
} else {
// OPTIMIZATION: Multiply by reciprocal
(e_value - self.thresholds.tau_deny) * self.inv_tau_range
}
}
/// Get current thresholds
#[inline]
pub fn thresholds(&self) -> &GateThresholds {
&self.thresholds
}
/// Get history size
#[inline(always)]
pub fn history_len(&self) -> usize {
self.mincut_history.len()
}
/// Get current baseline
#[inline(always)]
pub fn baseline(&self) -> Option<f64> {
self.baseline_mincut
}
/// Update thresholds and recompute reciprocals
///
/// OPTIMIZATION: Recomputes cached reciprocals when thresholds change
pub fn update_thresholds(&mut self, thresholds: GateThresholds) {
self.inv_min_cut = 1.0 / thresholds.min_cut;
self.inv_max_shift = 1.0 / thresholds.max_shift;
self.inv_tau_range = 1.0 / (thresholds.tau_permit - thresholds.tau_deny);
self.thresholds = thresholds;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_gate_decision_display() {
assert_eq!(GateDecision::Permit.to_string(), "permit");
assert_eq!(GateDecision::Defer.to_string(), "defer");
assert_eq!(GateDecision::Deny.to_string(), "deny");
}
#[test]
fn test_default_thresholds() {
let thresholds = GateThresholds::default();
assert_eq!(thresholds.tau_deny, 0.01);
assert_eq!(thresholds.tau_permit, 100.0);
assert_eq!(thresholds.min_cut, 5.0);
}
#[test]
fn test_three_filter_decision() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
// Default graph should permit
let graph = ReducedGraph::new();
let outcome = decision.evaluate(&graph);
// Default graph has high coherence, should permit
assert_eq!(outcome.decision, GateDecision::Permit);
}
#[test]
fn test_structural_denial() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
let mut graph = ReducedGraph::new();
graph.set_global_cut(1.0); // Below min_cut of 5.0
let outcome = decision.evaluate(&graph);
assert_eq!(outcome.decision, GateDecision::Deny);
assert_eq!(outcome.rejected_by, Some(DecisionFilter::Structural));
}
#[test]
fn test_shift_deferral() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
let mut graph = ReducedGraph::new();
graph.set_shift_pressure(0.8); // Above max_shift of 0.5
let outcome = decision.evaluate(&graph);
assert_eq!(outcome.decision, GateDecision::Defer);
assert_eq!(outcome.rejected_by, Some(DecisionFilter::Shift));
}
#[test]
fn test_evidence_deferral() {
let thresholds = GateThresholds::default();
let decision = ThreeFilterDecision::new(thresholds);
let mut graph = ReducedGraph::new();
graph.set_evidence(50.0); // Between tau_deny (0.01) and tau_permit (100.0)
let outcome = decision.evaluate(&graph);
assert_eq!(outcome.decision, GateDecision::Defer);
assert_eq!(outcome.rejected_by, Some(DecisionFilter::Evidence));
}
#[test]
fn test_decision_outcome_creation() {
let outcome = DecisionOutcome::permit(0.95, 1.0, 0.9, 0.95, 10.0);
assert_eq!(outcome.decision, GateDecision::Permit);
assert!(outcome.confidence > 0.9);
assert!(outcome.rejected_by.is_none());
}
#[test]
fn test_decision_filter_display() {
assert_eq!(DecisionFilter::Structural.to_string(), "Structural");
assert_eq!(DecisionFilter::Shift.to_string(), "Shift");
assert_eq!(DecisionFilter::Evidence.to_string(), "Evidence");
}
#[test]
fn test_baseline_observation() {
let thresholds = GateThresholds::default();
let mut decision = ThreeFilterDecision::new(thresholds);
assert!(decision.baseline().is_none());
decision.observe_mincut(10.0);
decision.observe_mincut(12.0);
decision.observe_mincut(8.0);
assert!(decision.baseline().is_some());
assert_eq!(decision.history_len(), 3);
}
}

View File

@@ -0,0 +1,247 @@
//! Evidence accumulation and filtering
use serde::{Deserialize, Serialize};
/// Aggregated evidence from all tiles
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AggregatedEvidence {
/// Total accumulated e-value
pub e_value: f64,
/// Number of tiles contributing
pub tile_count: usize,
/// Minimum e-value across tiles
pub min_e_value: f64,
/// Maximum e-value across tiles
pub max_e_value: f64,
}
impl AggregatedEvidence {
/// Create empty evidence
pub fn empty() -> Self {
Self {
e_value: 1.0,
tile_count: 0,
min_e_value: f64::INFINITY,
max_e_value: f64::NEG_INFINITY,
}
}
/// Add evidence from a tile
pub fn add(&mut self, e_value: f64) {
self.e_value *= e_value;
self.tile_count += 1;
self.min_e_value = self.min_e_value.min(e_value);
self.max_e_value = self.max_e_value.max(e_value);
}
}
/// Evidence filter for e-process evaluation
///
/// OPTIMIZATION: Uses multiplicative update for O(1) current value maintenance
/// instead of O(n) product computation.
pub struct EvidenceFilter {
/// Rolling e-value history (ring buffer)
history: Vec<f64>,
/// Current position in ring buffer
position: usize,
/// Capacity of ring buffer
capacity: usize,
/// Current accumulated value (maintained incrementally)
current: f64,
/// Log-space accumulator for numerical stability
log_current: f64,
}
impl EvidenceFilter {
/// Create a new evidence filter with given capacity
pub fn new(capacity: usize) -> Self {
Self {
history: Vec::with_capacity(capacity),
position: 0,
capacity,
current: 1.0,
log_current: 0.0,
}
}
/// Update with a new e-value
///
/// OPTIMIZATION: Uses multiplicative update for O(1) complexity
/// instead of O(n) product recomputation. Falls back to full
/// recomputation periodically to prevent numerical drift.
pub fn update(&mut self, e_value: f64) {
// Bound to prevent overflow/underflow
let bounded = e_value.clamp(1e-10, 1e10);
let log_bounded = bounded.ln();
if self.history.len() < self.capacity {
// Growing phase: just accumulate
self.history.push(bounded);
self.log_current += log_bounded;
} else {
// Ring buffer phase: multiplicative update
let old_value = self.history[self.position];
let old_log = old_value.ln();
self.history[self.position] = bounded;
self.log_current = self.log_current - old_log + log_bounded;
}
self.position = (self.position + 1) % self.capacity;
// Convert from log-space
self.current = self.log_current.exp();
// Periodic full recomputation for numerical stability (every 64 updates)
if self.position == 0 {
self.recompute_current();
}
}
/// Recompute current value from history (for stability)
#[inline]
fn recompute_current(&mut self) {
self.log_current = self.history.iter().map(|x| x.ln()).sum();
self.current = self.log_current.exp();
}
/// Get current accumulated e-value
#[inline]
pub fn current(&self) -> f64 {
self.current
}
/// Get the history of e-values
pub fn history(&self) -> &[f64] {
&self.history
}
/// Compute product using SIMD-friendly parallel lanes
///
/// OPTIMIZATION: Uses log-space arithmetic with parallel accumulators
/// for better numerical stability and vectorization.
pub fn current_simd(&self) -> f64 {
if self.history.is_empty() {
return 1.0;
}
// Use 4 parallel lanes for potential SIMD vectorization
let mut log_lanes = [0.0f64; 4];
for (i, &val) in self.history.iter().enumerate() {
log_lanes[i % 4] += val.ln();
}
let log_sum = log_lanes[0] + log_lanes[1] + log_lanes[2] + log_lanes[3];
log_sum.exp()
}
}
/// Aggregate 255 tile e-values using SIMD-friendly patterns
///
/// OPTIMIZATION: Uses parallel lane accumulation in log-space
/// for numerical stability when combining many e-values.
///
/// # Arguments
/// * `tile_e_values` - Slice of e-values from worker tiles
///
/// # Returns
/// Aggregated e-value (product in log-space)
pub fn aggregate_tiles_simd(tile_e_values: &[f64]) -> f64 {
if tile_e_values.is_empty() {
return 1.0;
}
// Use 8 parallel lanes for 256-bit SIMD (AVX2)
let mut log_lanes = [0.0f64; 8];
// Process in chunks of 8
let chunks = tile_e_values.chunks_exact(8);
let remainder = chunks.remainder();
for chunk in chunks {
log_lanes[0] += chunk[0].ln();
log_lanes[1] += chunk[1].ln();
log_lanes[2] += chunk[2].ln();
log_lanes[3] += chunk[3].ln();
log_lanes[4] += chunk[4].ln();
log_lanes[5] += chunk[5].ln();
log_lanes[6] += chunk[6].ln();
log_lanes[7] += chunk[7].ln();
}
// Handle remainder
for (i, &val) in remainder.iter().enumerate() {
log_lanes[i % 8] += val.ln();
}
// Tree reduction
let sum_0_3 = log_lanes[0] + log_lanes[1] + log_lanes[2] + log_lanes[3];
let sum_4_7 = log_lanes[4] + log_lanes[5] + log_lanes[6] + log_lanes[7];
(sum_0_3 + sum_4_7).exp()
}
/// Compute mixture e-value with adaptive precision
///
/// OPTIMIZATION: Uses different precision strategies based on
/// the magnitude of accumulated evidence for optimal performance.
///
/// # Arguments
/// * `log_e_values` - Log e-values from tiles
/// * `weights` - Optional tile weights (None = uniform)
///
/// # Returns
/// Weighted geometric mean of e-values
pub fn mixture_evalue_adaptive(log_e_values: &[f64], weights: Option<&[f64]>) -> f64 {
if log_e_values.is_empty() {
return 1.0;
}
let total: f64 = match weights {
Some(w) => {
// Weighted sum in log-space
log_e_values
.iter()
.zip(w.iter())
.map(|(&log_e, &weight)| log_e * weight)
.sum()
}
None => {
// Uniform weights - use SIMD pattern
let mut lanes = [0.0f64; 4];
for (i, &log_e) in log_e_values.iter().enumerate() {
lanes[i % 4] += log_e;
}
(lanes[0] + lanes[1] + lanes[2] + lanes[3]) / log_e_values.len() as f64
}
};
total.exp()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_aggregated_evidence() {
let mut evidence = AggregatedEvidence::empty();
evidence.add(2.0);
evidence.add(3.0);
assert_eq!(evidence.e_value, 6.0);
assert_eq!(evidence.tile_count, 2);
assert_eq!(evidence.min_e_value, 2.0);
assert_eq!(evidence.max_e_value, 3.0);
}
#[test]
fn test_evidence_filter() {
let mut filter = EvidenceFilter::new(10);
filter.update(2.0);
filter.update(2.0);
assert_eq!(filter.current(), 4.0);
}
}

View File

@@ -0,0 +1,393 @@
//! cognitum-gate-tilezero: TileZero arbiter for the Anytime-Valid Coherence Gate
//!
//! TileZero acts as the central arbiter in the 256-tile WASM fabric, responsible for:
//! - Merging worker tile reports into a supergraph
//! - Making global gate decisions (Permit/Defer/Deny)
//! - Issuing cryptographically signed permit tokens
//! - Maintaining a hash-chained witness receipt log
pub mod decision;
pub mod evidence;
pub mod merge;
pub mod permit;
pub mod receipt;
pub mod supergraph;
pub use decision::{
DecisionFilter, DecisionOutcome, EvidenceDecision, GateDecision, GateThresholds,
ThreeFilterDecision,
};
pub use evidence::{AggregatedEvidence, EvidenceFilter};
pub use merge::{MergeStrategy, MergedReport, ReportMerger, WorkerReport};
pub use permit::{PermitState, PermitToken, TokenDecodeError, Verifier, VerifyError};
pub use receipt::{ReceiptLog, TimestampProof, WitnessReceipt, WitnessSummary};
pub use supergraph::{ReducedGraph, ShiftPressure, StructuralFilter};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::atomic::{AtomicU64, Ordering};
use tokio::sync::RwLock;
/// Action identifier
pub type ActionId = String;
/// Vertex identifier in the coherence graph
pub type VertexId = u64;
/// Edge identifier in the coherence graph
pub type EdgeId = u64;
/// Worker tile identifier (1-255, with 0 reserved for TileZero)
pub type TileId = u8;
/// Context for an action being evaluated by the gate
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionContext {
/// Unique identifier for this action
pub action_id: ActionId,
/// Type of action (e.g., "config_change", "api_call")
pub action_type: String,
/// Target of the action
pub target: ActionTarget,
/// Additional context
pub context: ActionMetadata,
}
/// Target of an action
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionTarget {
/// Target device/resource
pub device: Option<String>,
/// Target path
pub path: Option<String>,
/// Additional target properties
#[serde(flatten)]
pub extra: HashMap<String, serde_json::Value>,
}
/// Metadata about the action context
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActionMetadata {
/// Agent requesting the action
pub agent_id: String,
/// Session identifier
pub session_id: Option<String>,
/// Prior related actions
#[serde(default)]
pub prior_actions: Vec<ActionId>,
/// Urgency level
#[serde(default = "default_urgency")]
pub urgency: String,
}
fn default_urgency() -> String {
"normal".to_string()
}
/// Report from a worker tile
#[repr(C, align(64))]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TileReport {
/// Tile identifier (1-255)
pub tile_id: TileId,
/// Local coherence score
pub coherence: f32,
/// Whether boundary has moved since last report
pub boundary_moved: bool,
/// Top suspicious edges
pub suspicious_edges: Vec<EdgeId>,
/// Local e-value accumulator
pub e_value: f32,
/// Witness fragment for boundary changes
pub witness_fragment: Option<WitnessFragment>,
}
/// Fragment of witness data from a worker tile
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WitnessFragment {
/// Tile that generated this fragment
pub tile_id: TileId,
/// Boundary edges in this shard
pub boundary_edges: Vec<EdgeId>,
/// Local cut value
pub cut_value: f32,
}
/// Escalation information for DEFER decisions
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EscalationInfo {
/// Who to escalate to
pub to: String,
/// URL for context
pub context_url: String,
/// Timeout in nanoseconds
pub timeout_ns: u64,
/// Default action on timeout
#[serde(default = "default_timeout_action")]
pub default_on_timeout: String,
}
fn default_timeout_action() -> String {
"deny".to_string()
}
/// TileZero: The central arbiter of the coherence gate
pub struct TileZero {
/// Reduced supergraph from worker summaries
supergraph: RwLock<ReducedGraph>,
/// Canonical permit token state
permit_state: PermitState,
/// Hash-chained witness receipt log
receipt_log: RwLock<ReceiptLog>,
/// Threshold configuration
thresholds: GateThresholds,
/// Sequence counter
sequence: AtomicU64,
}
impl TileZero {
/// Create a new TileZero arbiter
pub fn new(thresholds: GateThresholds) -> Self {
Self {
supergraph: RwLock::new(ReducedGraph::new()),
permit_state: PermitState::new(),
receipt_log: RwLock::new(ReceiptLog::new()),
thresholds,
sequence: AtomicU64::new(0),
}
}
/// Collect reports from all worker tiles
pub async fn collect_reports(&self, reports: &[TileReport]) {
let mut graph = self.supergraph.write().await;
for report in reports {
if report.boundary_moved {
if let Some(ref fragment) = report.witness_fragment {
graph.update_from_fragment(fragment);
}
}
graph.update_coherence(report.tile_id, report.coherence);
}
}
/// Make a gate decision for an action
pub async fn decide(&self, action_ctx: &ActionContext) -> PermitToken {
let seq = self.sequence.fetch_add(1, Ordering::SeqCst);
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64;
let graph = self.supergraph.read().await;
// Three stacked filters:
// 1. Structural filter (global cut on reduced graph)
let structural_ok = graph.global_cut() >= self.thresholds.min_cut;
// 2. Shift filter (aggregated shift pressure)
let shift_pressure = graph.aggregate_shift_pressure();
let shift_ok = shift_pressure < self.thresholds.max_shift;
// 3. Evidence filter
let e_aggregate = graph.aggregate_evidence();
let evidence_decision = self.evidence_decision(e_aggregate);
// Combined decision
let decision = match (structural_ok, shift_ok, evidence_decision) {
(false, _, _) => GateDecision::Deny,
(_, false, _) => GateDecision::Defer,
(_, _, EvidenceDecision::Reject) => GateDecision::Deny,
(_, _, EvidenceDecision::Continue) => GateDecision::Defer,
(true, true, EvidenceDecision::Accept) => GateDecision::Permit,
};
// Compute witness hash
let witness_summary = graph.witness_summary();
let witness_hash = witness_summary.hash();
drop(graph);
// Create token
let token = PermitToken {
decision,
action_id: action_ctx.action_id.clone(),
timestamp: now,
ttl_ns: self.thresholds.permit_ttl_ns,
witness_hash,
sequence: seq,
signature: [0u8; 64], // Will be filled by sign
};
// Sign the token
let signed_token = self.permit_state.sign_token(token);
// Emit receipt
self.emit_receipt(&signed_token, &witness_summary).await;
signed_token
}
/// Get evidence decision based on accumulated e-value
fn evidence_decision(&self, e_aggregate: f64) -> EvidenceDecision {
if e_aggregate < self.thresholds.tau_deny {
EvidenceDecision::Reject
} else if e_aggregate >= self.thresholds.tau_permit {
EvidenceDecision::Accept
} else {
EvidenceDecision::Continue
}
}
/// Emit a witness receipt
async fn emit_receipt(&self, token: &PermitToken, summary: &WitnessSummary) {
let mut log = self.receipt_log.write().await;
let previous_hash = log.last_hash();
let receipt = WitnessReceipt {
sequence: token.sequence,
token: token.clone(),
previous_hash,
witness_summary: summary.clone(),
timestamp_proof: TimestampProof {
timestamp: token.timestamp,
previous_receipt_hash: previous_hash,
merkle_root: [0u8; 32], // Simplified for v0
},
};
log.append(receipt);
}
/// Get a receipt by sequence number
pub async fn get_receipt(&self, sequence: u64) -> Option<WitnessReceipt> {
let log = self.receipt_log.read().await;
log.get(sequence).cloned()
}
/// Verify the hash chain up to a sequence number
pub async fn verify_chain_to(&self, sequence: u64) -> Result<(), ChainVerifyError> {
let log = self.receipt_log.read().await;
log.verify_chain_to(sequence)
}
/// Replay a decision for audit purposes
pub async fn replay(&self, receipt: &WitnessReceipt) -> ReplayResult {
// In a full implementation, this would reconstruct state from checkpoints
// For now, return the original decision
ReplayResult {
decision: receipt.token.decision,
state_snapshot: receipt.witness_summary.clone(),
}
}
/// Get the verifier for token validation
pub fn verifier(&self) -> Verifier {
self.permit_state.verifier()
}
/// Get the thresholds configuration
pub fn thresholds(&self) -> &GateThresholds {
&self.thresholds
}
/// Verify the entire receipt chain
pub async fn verify_receipt_chain(&self) -> Result<(), ChainVerifyError> {
let log = self.receipt_log.read().await;
let len = log.len();
if len == 0 {
return Ok(());
}
log.verify_chain_to(len as u64 - 1)
}
/// Export all receipts as JSON
pub async fn export_receipts_json(&self) -> Result<String, serde_json::Error> {
let log = self.receipt_log.read().await;
let receipts: Vec<&WitnessReceipt> = log.iter().collect();
serde_json::to_string_pretty(&receipts)
}
}
/// Result of replaying a decision
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplayResult {
/// The replayed decision
pub decision: GateDecision,
/// State snapshot at decision time
pub state_snapshot: WitnessSummary,
}
/// Error during chain verification
#[derive(Debug, thiserror::Error)]
pub enum ChainVerifyError {
#[error("Receipt {sequence} not found")]
ReceiptNotFound { sequence: u64 },
#[error("Hash mismatch at sequence {sequence}")]
HashMismatch { sequence: u64 },
#[error("Signature verification failed at sequence {sequence}")]
SignatureInvalid { sequence: u64 },
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_tilezero_basic_permit() {
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
let ctx = ActionContext {
action_id: "test-action-1".to_string(),
action_type: "config_change".to_string(),
target: ActionTarget {
device: Some("router-1".to_string()),
path: Some("/config".to_string()),
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: "agent-1".to_string(),
session_id: Some("session-1".to_string()),
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
let token = tilezero.decide(&ctx).await;
assert_eq!(token.sequence, 0);
assert!(!token.action_id.is_empty());
}
#[tokio::test]
async fn test_receipt_chain() {
let thresholds = GateThresholds::default();
let tilezero = TileZero::new(thresholds);
let ctx = ActionContext {
action_id: "test-action-1".to_string(),
action_type: "config_change".to_string(),
target: ActionTarget {
device: None,
path: None,
extra: HashMap::new(),
},
context: ActionMetadata {
agent_id: "agent-1".to_string(),
session_id: None,
prior_actions: vec![],
urgency: "normal".to_string(),
},
};
// Generate multiple decisions
let _token1 = tilezero.decide(&ctx).await;
let _token2 = tilezero.decide(&ctx).await;
// Verify receipts exist
let receipt0 = tilezero.get_receipt(0).await;
assert!(receipt0.is_some());
let receipt1 = tilezero.get_receipt(1).await;
assert!(receipt1.is_some());
}
}

View File

@@ -0,0 +1,609 @@
//! Report merging from 255 worker tiles
//!
//! This module handles aggregating partial graph reports from worker tiles
//! into a unified view for supergraph construction.
//!
//! ## Performance Optimizations
//!
//! - Pre-allocated HashMaps with expected capacity (255 workers)
//! - Inline functions for merge strategies
//! - Iterator-based processing to avoid allocations
//! - Sorted slices with binary search for median calculation
//! - Capacity hints for all collections
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use crate::TileId;
/// Expected number of worker tiles for capacity pre-allocation
const EXPECTED_WORKERS: usize = 255;
/// Expected nodes per worker for capacity hints
const EXPECTED_NODES_PER_WORKER: usize = 16;
/// Expected boundary edges per worker
const EXPECTED_EDGES_PER_WORKER: usize = 32;
/// Epoch identifier for report sequencing
pub type Epoch = u64;
/// Transaction identifier (32-byte hash)
pub type TxId = [u8; 32];
/// Errors during report merging
#[derive(Debug, Clone)]
pub enum MergeError {
/// Empty report set
EmptyReports,
/// Conflicting epochs in reports
ConflictingEpochs,
/// Invalid edge weight
InvalidWeight(String),
/// Node not found
NodeNotFound(String),
}
impl std::fmt::Display for MergeError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MergeError::EmptyReports => write!(f, "Empty report set"),
MergeError::ConflictingEpochs => write!(f, "Conflicting epochs in reports"),
MergeError::InvalidWeight(msg) => write!(f, "Invalid edge weight: {}", msg),
MergeError::NodeNotFound(id) => write!(f, "Node not found: {}", id),
}
}
}
impl std::error::Error for MergeError {}
/// Strategy for merging overlapping data from multiple workers
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum MergeStrategy {
/// Simple average of all values
SimpleAverage,
/// Weighted average by tile confidence
WeightedAverage,
/// Take the median value
Median,
/// Take the maximum value (conservative)
Maximum,
/// Byzantine fault tolerant (2/3 agreement)
ByzantineFaultTolerant,
}
/// A node summary from a worker tile
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeSummary {
/// Node identifier
pub id: String,
/// Aggregated weight/importance
pub weight: f64,
/// Number of edges in worker's partition
pub edge_count: usize,
/// Local coherence score
pub coherence: f64,
}
/// An edge summary from a worker tile (for boundary edges)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EdgeSummary {
/// Source node ID
pub source: String,
/// Target node ID
pub target: String,
/// Edge capacity/weight
pub capacity: f64,
/// Is this a boundary edge (crosses tile partitions)?
pub is_boundary: bool,
}
/// Report from a worker tile containing partition summary
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct WorkerReport {
/// Tile identifier (1-255)
pub tile_id: TileId,
/// Epoch this report belongs to
pub epoch: Epoch,
/// Timestamp when report was generated (unix millis)
pub timestamp_ms: u64,
/// Transactions processed in this partition
pub transactions: Vec<TxId>,
/// Node summaries for super-nodes
pub nodes: Vec<NodeSummary>,
/// Boundary edge summaries
pub boundary_edges: Vec<EdgeSummary>,
/// Local min-cut value (within partition)
pub local_mincut: f64,
/// Worker's confidence in this report (0.0-1.0)
pub confidence: f64,
/// Hash of the worker's local state
pub state_hash: [u8; 32],
}
impl WorkerReport {
/// Create a new worker report
pub fn new(tile_id: TileId, epoch: Epoch) -> Self {
Self {
tile_id,
epoch,
timestamp_ms: 0,
transactions: Vec::new(),
nodes: Vec::new(),
boundary_edges: Vec::new(),
local_mincut: 0.0,
confidence: 1.0,
state_hash: [0u8; 32],
}
}
/// Add a node summary
pub fn add_node(&mut self, node: NodeSummary) {
self.nodes.push(node);
}
/// Add a boundary edge
pub fn add_boundary_edge(&mut self, edge: EdgeSummary) {
self.boundary_edges.push(edge);
}
/// Compute state hash using blake3
pub fn compute_state_hash(&mut self) {
let mut hasher = blake3::Hasher::new();
hasher.update(&self.tile_id.to_le_bytes());
hasher.update(&self.epoch.to_le_bytes());
for node in &self.nodes {
hasher.update(node.id.as_bytes());
hasher.update(&node.weight.to_le_bytes());
}
for edge in &self.boundary_edges {
hasher.update(edge.source.as_bytes());
hasher.update(edge.target.as_bytes());
hasher.update(&edge.capacity.to_le_bytes());
}
self.state_hash = *hasher.finalize().as_bytes();
}
}
/// Merged report combining data from multiple workers
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergedReport {
/// Epoch of the merged report
pub epoch: Epoch,
/// Number of worker reports merged
pub worker_count: usize,
/// Merged super-nodes (aggregated from all workers)
pub super_nodes: HashMap<String, MergedNode>,
/// Merged boundary edges
pub boundary_edges: Vec<MergedEdge>,
/// Global min-cut estimate
pub global_mincut_estimate: f64,
/// Overall confidence (aggregated)
pub confidence: f64,
/// Merge strategy used
pub strategy: MergeStrategy,
}
/// A merged super-node aggregated from multiple workers
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergedNode {
/// Node identifier
pub id: String,
/// Aggregated weight
pub weight: f64,
/// Total edge count across workers
pub total_edge_count: usize,
/// Average coherence
pub avg_coherence: f64,
/// Contributing worker tiles
pub contributors: Vec<TileId>,
}
/// A merged edge aggregated from boundary reports
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MergedEdge {
/// Source node
pub source: String,
/// Target node
pub target: String,
/// Aggregated capacity
pub capacity: f64,
/// Number of workers reporting this edge
pub report_count: usize,
}
/// Report merger that combines worker reports
///
/// OPTIMIZATION: Uses capacity hints and inline functions for better performance
pub struct ReportMerger {
strategy: MergeStrategy,
/// Pre-allocated scratch buffer for weight calculations
/// OPTIMIZATION: Reuse allocation across merge operations
scratch_weights: Vec<f64>,
}
impl ReportMerger {
/// Create a new report merger with given strategy
#[inline]
pub fn new(strategy: MergeStrategy) -> Self {
Self {
strategy,
// Pre-allocate scratch buffer with expected capacity
scratch_weights: Vec::with_capacity(EXPECTED_WORKERS),
}
}
/// Merge multiple worker reports into a unified view
///
/// OPTIMIZATION: Pre-allocates all collections with expected capacity
pub fn merge(&self, reports: &[WorkerReport]) -> Result<MergedReport, MergeError> {
if reports.is_empty() {
return Err(MergeError::EmptyReports);
}
// Verify all reports are from the same epoch
// OPTIMIZATION: Use first() and fold for short-circuit evaluation
let epoch = reports[0].epoch;
for r in reports.iter().skip(1) {
if r.epoch != epoch {
return Err(MergeError::ConflictingEpochs);
}
}
// Merge nodes - pre-allocate based on expected size
let super_nodes = self.merge_nodes(reports)?;
// Merge boundary edges
let boundary_edges = self.merge_edges(reports)?;
// Compute global min-cut estimate
let global_mincut_estimate = self.estimate_global_mincut(reports);
// Compute aggregated confidence
let confidence = self.aggregate_confidence(reports);
Ok(MergedReport {
epoch,
worker_count: reports.len(),
super_nodes,
boundary_edges,
global_mincut_estimate,
confidence,
strategy: self.strategy,
})
}
/// Merge node summaries from all workers
///
/// OPTIMIZATION: Pre-allocates HashMap with expected capacity
#[inline]
fn merge_nodes(
&self,
reports: &[WorkerReport],
) -> Result<HashMap<String, MergedNode>, MergeError> {
// OPTIMIZATION: Estimate total nodes across all reports
let estimated_nodes = reports.len() * EXPECTED_NODES_PER_WORKER;
let mut node_data: HashMap<String, Vec<(TileId, &NodeSummary)>> =
HashMap::with_capacity(estimated_nodes);
// Collect all node data
for report in reports {
for node in &report.nodes {
node_data
.entry(node.id.clone())
.or_insert_with(|| Vec::with_capacity(reports.len()))
.push((report.tile_id, node));
}
}
// Merge each node
// OPTIMIZATION: Pre-allocate result HashMap
let mut merged = HashMap::with_capacity(node_data.len());
for (id, data) in node_data {
let merged_node = self.merge_single_node(&id, &data)?;
merged.insert(id, merged_node);
}
Ok(merged)
}
/// Merge a single node's data from multiple workers
///
/// OPTIMIZATION: Uses inline strategy functions and avoids repeated allocations
#[inline]
fn merge_single_node(
&self,
id: &str,
data: &[(TileId, &NodeSummary)],
) -> Result<MergedNode, MergeError> {
// OPTIMIZATION: Pre-allocate with exact capacity
let mut contributors: Vec<TileId> = Vec::with_capacity(data.len());
contributors.extend(data.iter().map(|(tile, _)| *tile));
let total_edge_count: usize = data.iter().map(|(_, n)| n.edge_count).sum();
let len = data.len();
let len_f64 = len as f64;
let weight = match self.strategy {
MergeStrategy::SimpleAverage => {
// OPTIMIZATION: Single pass sum
let sum: f64 = data.iter().map(|(_, n)| n.weight).sum();
sum / len_f64
}
MergeStrategy::WeightedAverage => {
// OPTIMIZATION: Single pass for both sums
let (weighted_sum, coherence_sum) =
data.iter().fold((0.0, 0.0), |(ws, cs), (_, n)| {
(ws + n.weight * n.coherence, cs + n.coherence)
});
if coherence_sum > 0.0 {
weighted_sum / coherence_sum
} else {
0.0
}
}
MergeStrategy::Median => {
// OPTIMIZATION: Inline median calculation
Self::compute_median(data.iter().map(|(_, n)| n.weight))
}
MergeStrategy::Maximum => {
// OPTIMIZATION: Use fold without intermediate iterator
data.iter()
.map(|(_, n)| n.weight)
.fold(f64::NEG_INFINITY, f64::max)
}
MergeStrategy::ByzantineFaultTolerant => {
// OPTIMIZATION: BFT with inline median of 2/3
Self::compute_bft_weight(data.iter().map(|(_, n)| n.weight), len)
}
};
// OPTIMIZATION: Single pass for coherence average
let avg_coherence = data.iter().map(|(_, n)| n.coherence).sum::<f64>() / len_f64;
Ok(MergedNode {
id: id.to_string(),
weight,
total_edge_count,
avg_coherence,
contributors,
})
}
/// Compute median of an iterator of f64 values
///
/// OPTIMIZATION: Inline function to avoid heap allocation overhead
#[inline]
fn compute_median<I: Iterator<Item = f64>>(iter: I) -> f64 {
let mut weights: Vec<f64> = iter.collect();
let len = weights.len();
if len == 0 {
return 0.0;
}
// OPTIMIZATION: Use unstable sort for f64 (faster, no stability needed)
weights.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
let mid = len / 2;
if len % 2 == 0 {
// SAFETY: mid > 0 when len >= 2 and even
(weights[mid - 1] + weights[mid]) * 0.5
} else {
weights[mid]
}
}
/// Compute Byzantine Fault Tolerant weight (median of top 2/3)
///
/// OPTIMIZATION: Inline function with optimized threshold calculation
#[inline]
fn compute_bft_weight<I: Iterator<Item = f64>>(iter: I, len: usize) -> f64 {
let mut weights: Vec<f64> = iter.collect();
if weights.is_empty() {
return 0.0;
}
weights.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
// 2/3 threshold
let threshold = (len * 2) / 3;
if threshold > 0 {
let sum: f64 = weights.iter().take(threshold).sum();
sum / threshold as f64
} else {
weights[0]
}
}
/// Merge boundary edges from all workers
///
/// OPTIMIZATION: Pre-allocates collections, uses inline merge strategies
#[inline]
fn merge_edges(&self, reports: &[WorkerReport]) -> Result<Vec<MergedEdge>, MergeError> {
// OPTIMIZATION: Pre-allocate with expected capacity
let estimated_edges = reports.len() * EXPECTED_EDGES_PER_WORKER;
let mut edge_data: HashMap<(String, String), Vec<f64>> =
HashMap::with_capacity(estimated_edges);
// Collect all edge data
for report in reports {
for edge in &report.boundary_edges {
if edge.is_boundary {
// Normalize edge key (smaller first for undirected)
// OPTIMIZATION: Avoid unnecessary clones by checking order first
let key = if edge.source <= edge.target {
(edge.source.clone(), edge.target.clone())
} else {
(edge.target.clone(), edge.source.clone())
};
edge_data
.entry(key)
.or_insert_with(|| Vec::with_capacity(reports.len()))
.push(edge.capacity);
}
}
}
// Merge each edge
// OPTIMIZATION: Pre-allocate result vector
let mut merged = Vec::with_capacity(edge_data.len());
for ((source, target), capacities) in edge_data {
let len = capacities.len();
let capacity = self.merge_capacities(&capacities, len);
merged.push(MergedEdge {
source,
target,
capacity,
report_count: len,
});
}
Ok(merged)
}
/// Merge capacities according to strategy
///
/// OPTIMIZATION: Inline function to avoid match overhead in loop
#[inline(always)]
fn merge_capacities(&self, capacities: &[f64], len: usize) -> f64 {
match self.strategy {
MergeStrategy::SimpleAverage | MergeStrategy::WeightedAverage => {
capacities.iter().sum::<f64>() / len as f64
}
MergeStrategy::Median => Self::compute_median(capacities.iter().copied()),
MergeStrategy::Maximum => capacities.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b)),
MergeStrategy::ByzantineFaultTolerant => {
Self::compute_bft_weight(capacities.iter().copied(), len)
}
}
}
/// Estimate global min-cut from local values
///
/// OPTIMIZATION: Single-pass computation
#[inline]
fn estimate_global_mincut(&self, reports: &[WorkerReport]) -> f64 {
// OPTIMIZATION: Single pass for both local_sum and boundary_count
let (local_sum, boundary_count) = reports.iter().fold((0.0, 0usize), |(sum, count), r| {
let bc = r.boundary_edges.iter().filter(|e| e.is_boundary).count();
(sum + r.local_mincut, count + bc)
});
// Simple estimate: local sum adjusted by boundary factor
// OPTIMIZATION: Pre-compute constant multiplier
let boundary_factor = 1.0 / (1.0 + (boundary_count as f64 * 0.01));
local_sum * boundary_factor
}
/// Aggregate confidence from all workers
///
/// OPTIMIZATION: Inline, uses fold for single-pass computation
#[inline]
fn aggregate_confidence(&self, reports: &[WorkerReport]) -> f64 {
let len = reports.len();
if len == 0 {
return 0.0;
}
match self.strategy {
MergeStrategy::ByzantineFaultTolerant => {
// Conservative: use minimum of top 2/3
let mut confidences: Vec<f64> = Vec::with_capacity(len);
confidences.extend(reports.iter().map(|r| r.confidence));
// Sort descending
confidences
.sort_unstable_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
let threshold = (len * 2) / 3;
confidences
.get(threshold.saturating_sub(1))
.copied()
.unwrap_or(0.0)
}
_ => {
// Geometric mean using log-sum for numerical stability
// OPTIMIZATION: Use log-sum-exp pattern to avoid overflow
let log_sum: f64 = reports.iter().map(|r| r.confidence.ln()).sum();
(log_sum / len as f64).exp()
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn create_test_report(tile_id: TileId, epoch: Epoch) -> WorkerReport {
let mut report = WorkerReport::new(tile_id, epoch);
report.add_node(NodeSummary {
id: "node1".to_string(),
weight: tile_id as f64 * 0.1,
edge_count: 5,
coherence: 0.9,
});
report.confidence = 0.95;
report.local_mincut = 1.0;
report
}
#[test]
fn test_merge_simple_average() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports = vec![
create_test_report(1, 0),
create_test_report(2, 0),
create_test_report(3, 0),
];
let merged = merger.merge(&reports).unwrap();
assert_eq!(merged.worker_count, 3);
assert_eq!(merged.epoch, 0);
let node = merged.super_nodes.get("node1").unwrap();
// Average of 0.1, 0.2, 0.3 = 0.2
assert!((node.weight - 0.2).abs() < 0.001);
}
#[test]
fn test_merge_empty_reports() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let result = merger.merge(&[]);
assert!(matches!(result, Err(MergeError::EmptyReports)));
}
#[test]
fn test_merge_conflicting_epochs() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports = vec![create_test_report(1, 0), create_test_report(2, 1)];
let result = merger.merge(&reports);
assert!(matches!(result, Err(MergeError::ConflictingEpochs)));
}
#[test]
fn test_state_hash_computation() {
let mut report = create_test_report(1, 0);
report.compute_state_hash();
assert_ne!(report.state_hash, [0u8; 32]);
}
}

View File

@@ -0,0 +1,303 @@
//! Permit token issuance and verification
use crate::{ActionId, GateDecision};
use ed25519_dalek::{Signature, Signer, SigningKey, Verifier as Ed25519Verifier, VerifyingKey};
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
/// Permit token: a signed capability that agents must present
#[repr(C)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PermitToken {
/// Gate decision
pub decision: GateDecision,
/// Action being permitted
pub action_id: ActionId,
/// Timestamp (nanoseconds since epoch)
pub timestamp: u64,
/// Time-to-live in nanoseconds
pub ttl_ns: u64,
/// Hash of the witness data
#[serde(with = "hex::serde")]
pub witness_hash: [u8; 32],
/// Sequence number
pub sequence: u64,
/// Full Ed25519 signature (64 bytes)
#[serde(with = "hex::serde")]
pub signature: [u8; 64],
}
impl PermitToken {
/// Check if token is still valid (not expired)
pub fn is_valid_time(&self, now_ns: u64) -> bool {
now_ns <= self.timestamp + self.ttl_ns
}
/// Encode token to base64 for transport
pub fn encode_base64(&self) -> String {
let json = serde_json::to_vec(self).unwrap_or_default();
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, &json)
}
/// Decode token from base64
pub fn decode_base64(encoded: &str) -> Result<Self, TokenDecodeError> {
let bytes = base64::Engine::decode(&base64::engine::general_purpose::STANDARD, encoded)
.map_err(|_| TokenDecodeError::InvalidBase64)?;
serde_json::from_slice(&bytes).map_err(|_| TokenDecodeError::InvalidJson)
}
/// Get the content to be signed (excludes mac field)
pub fn signable_content(&self) -> Vec<u8> {
let mut content = Vec::with_capacity(128);
content.extend_from_slice(&self.sequence.to_le_bytes());
content.extend_from_slice(&self.timestamp.to_le_bytes());
content.extend_from_slice(&self.ttl_ns.to_le_bytes());
content.extend_from_slice(&self.witness_hash);
content.extend_from_slice(self.action_id.as_bytes());
content.push(self.decision as u8);
content
}
}
/// Error decoding a token
#[derive(Debug, thiserror::Error)]
pub enum TokenDecodeError {
#[error("Invalid base64 encoding")]
InvalidBase64,
#[error("Invalid JSON structure")]
InvalidJson,
}
/// Permit state: manages signing keys and token issuance
pub struct PermitState {
/// Signing key for tokens
signing_key: SigningKey,
/// Next sequence number
next_sequence: std::sync::atomic::AtomicU64,
}
impl PermitState {
/// Create new permit state with fresh signing key
pub fn new() -> Self {
let signing_key = SigningKey::generate(&mut OsRng);
Self {
signing_key,
next_sequence: std::sync::atomic::AtomicU64::new(0),
}
}
/// Create permit state with a specific signing key
pub fn with_key(signing_key: SigningKey) -> Self {
Self {
signing_key,
next_sequence: std::sync::atomic::AtomicU64::new(0),
}
}
/// Get the next sequence number
pub fn next_sequence(&self) -> u64 {
self.next_sequence
.fetch_add(1, std::sync::atomic::Ordering::SeqCst)
}
/// Sign a token with full Ed25519 signature
pub fn sign_token(&self, mut token: PermitToken) -> PermitToken {
let content = token.signable_content();
let hash = blake3::hash(&content);
let signature = self.signing_key.sign(hash.as_bytes());
// Store full 64-byte Ed25519 signature
token.signature.copy_from_slice(&signature.to_bytes());
token
}
/// Get a verifier for this permit state
pub fn verifier(&self) -> Verifier {
Verifier {
verifying_key: self.signing_key.verifying_key(),
}
}
}
impl Default for PermitState {
fn default() -> Self {
Self::new()
}
}
/// Token verifier with actual Ed25519 signature verification
#[derive(Clone)]
pub struct Verifier {
/// Ed25519 verifying key
verifying_key: VerifyingKey,
}
impl Verifier {
/// Create a new verifier from a verifying key
pub fn new(verifying_key: VerifyingKey) -> Self {
Self { verifying_key }
}
/// Verify a token's Ed25519 signature
pub fn verify(&self, token: &PermitToken) -> Result<(), VerifyError> {
// Compute hash of signable content
let content = token.signable_content();
let hash = blake3::hash(&content);
// Reconstruct the Ed25519 signature from stored bytes
let signature = Signature::from_bytes(&token.signature);
// Actually verify the signature using Ed25519
self.verifying_key
.verify(hash.as_bytes(), &signature)
.map_err(|_| VerifyError::SignatureFailed)
}
/// Verify token is valid (signature + time)
pub fn verify_full(&self, token: &PermitToken) -> Result<(), VerifyError> {
// Check signature first
self.verify(token)?;
// Check TTL - use saturating add to prevent overflow
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64;
let expiry = token.timestamp.saturating_add(token.ttl_ns);
if now > expiry {
return Err(VerifyError::Expired);
}
Ok(())
}
}
/// Verification error
#[derive(Debug, thiserror::Error)]
pub enum VerifyError {
#[error("Signature verification failed")]
SignatureFailed,
#[error("Hash mismatch")]
HashMismatch,
#[error("Token has expired")]
Expired,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_token_sign_verify() {
let state = PermitState::new();
let verifier = state.verifier();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
assert!(verifier.verify(&signed).is_ok());
}
#[test]
fn test_token_tamper_detection() {
let state = PermitState::new();
let verifier = state.verifier();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let mut signed = state.sign_token(token);
// Tamper with the action_id
signed.action_id = "malicious-action".to_string();
// Verification should fail
assert!(verifier.verify(&signed).is_err());
}
#[test]
fn test_token_wrong_key_rejection() {
let state1 = PermitState::new();
let state2 = PermitState::new();
let verifier2 = state2.verifier();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Sign with state1's key
let signed = state1.sign_token(token);
// Verify with state2's key should fail
assert!(verifier2.verify(&signed).is_err());
}
#[test]
fn test_token_base64_roundtrip() {
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let encoded = token.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
assert_eq!(token.action_id, decoded.action_id);
assert_eq!(token.sequence, decoded.sequence);
}
#[test]
fn test_token_expiry() {
let state = PermitState::new();
let verifier = state.verifier();
// Create a token that expired in the past
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test-action".to_string(),
timestamp: 1000000000, // Long ago
ttl_ns: 1, // 1 nanosecond TTL
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
// Signature should be valid
assert!(verifier.verify(&signed).is_ok());
// But full verification (including TTL) should fail
assert!(matches!(
verifier.verify_full(&signed),
Err(VerifyError::Expired)
));
}
}

View File

@@ -0,0 +1,271 @@
//! Witness receipt and hash-chained log
use crate::{ChainVerifyError, PermitToken};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Witness receipt: cryptographic proof of a gate decision
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WitnessReceipt {
/// Sequence number
pub sequence: u64,
/// The permit token issued
pub token: PermitToken,
/// Hash of the previous receipt
#[serde(with = "hex::serde")]
pub previous_hash: [u8; 32],
/// Summary of witness data
pub witness_summary: WitnessSummary,
/// Timestamp proof
pub timestamp_proof: TimestampProof,
}
impl WitnessReceipt {
/// Compute the hash of this receipt
pub fn hash(&self) -> [u8; 32] {
let mut hasher = blake3::Hasher::new();
hasher.update(&self.sequence.to_le_bytes());
hasher.update(&self.token.signable_content());
hasher.update(&self.previous_hash);
hasher.update(&self.witness_summary.hash());
*hasher.finalize().as_bytes()
}
}
/// Timestamp proof for receipts
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TimestampProof {
/// Timestamp
pub timestamp: u64,
/// Hash of previous receipt
#[serde(with = "hex::serde")]
pub previous_receipt_hash: [u8; 32],
/// Merkle root (for batch anchoring)
#[serde(with = "hex::serde")]
pub merkle_root: [u8; 32],
}
/// Summary of witness data from the three filters
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WitnessSummary {
/// Structural witness
pub structural: StructuralWitness,
/// Predictive witness
pub predictive: PredictiveWitness,
/// Evidential witness
pub evidential: EvidentialWitness,
}
impl WitnessSummary {
/// Create an empty witness summary
pub fn empty() -> Self {
Self {
structural: StructuralWitness {
cut_value: 0.0,
partition: "unknown".to_string(),
critical_edges: 0,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 0,
coverage: 0.0,
},
evidential: EvidentialWitness {
e_value: 1.0,
verdict: "unknown".to_string(),
},
}
}
/// Compute hash of the summary
pub fn hash(&self) -> [u8; 32] {
let json = serde_json::to_vec(self).unwrap_or_default();
*blake3::hash(&json).as_bytes()
}
/// Convert to JSON
pub fn to_json(&self) -> serde_json::Value {
serde_json::to_value(self).unwrap_or(serde_json::Value::Null)
}
}
/// Structural witness from min-cut analysis
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StructuralWitness {
/// Cut value
pub cut_value: f64,
/// Partition status
pub partition: String,
/// Number of critical edges
pub critical_edges: usize,
/// Boundary edge IDs
#[serde(default)]
pub boundary: Vec<String>,
}
/// Predictive witness from conformal prediction
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PredictiveWitness {
/// Prediction set size
pub set_size: usize,
/// Coverage target
pub coverage: f64,
}
/// Evidential witness from e-process
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EvidentialWitness {
/// Accumulated e-value
pub e_value: f64,
/// Verdict (accept/continue/reject)
pub verdict: String,
}
/// Hash-chained receipt log
pub struct ReceiptLog {
/// Receipts by sequence number
receipts: HashMap<u64, WitnessReceipt>,
/// Latest sequence number
latest_sequence: Option<u64>,
/// Hash of the latest receipt
latest_hash: [u8; 32],
}
impl ReceiptLog {
/// Create a new receipt log
pub fn new() -> Self {
Self {
receipts: HashMap::new(),
latest_sequence: None,
latest_hash: [0u8; 32], // Genesis hash
}
}
/// Get the last hash in the chain
pub fn last_hash(&self) -> [u8; 32] {
self.latest_hash
}
/// Append a receipt to the log
pub fn append(&mut self, receipt: WitnessReceipt) {
let hash = receipt.hash();
let seq = receipt.sequence;
self.receipts.insert(seq, receipt);
self.latest_sequence = Some(seq);
self.latest_hash = hash;
}
/// Get a receipt by sequence number
pub fn get(&self, sequence: u64) -> Option<&WitnessReceipt> {
self.receipts.get(&sequence)
}
/// Get the latest sequence number
pub fn latest_sequence(&self) -> Option<u64> {
self.latest_sequence
}
/// Verify the hash chain up to a sequence number
pub fn verify_chain_to(&self, sequence: u64) -> Result<(), ChainVerifyError> {
let mut expected_previous = [0u8; 32]; // Genesis
for seq in 0..=sequence {
let receipt = self
.receipts
.get(&seq)
.ok_or(ChainVerifyError::ReceiptNotFound { sequence: seq })?;
if receipt.previous_hash != expected_previous {
return Err(ChainVerifyError::HashMismatch { sequence: seq });
}
expected_previous = receipt.hash();
}
Ok(())
}
/// Get the number of receipts
pub fn len(&self) -> usize {
self.receipts.len()
}
/// Check if log is empty
pub fn is_empty(&self) -> bool {
self.receipts.is_empty()
}
/// Iterate over receipts
pub fn iter(&self) -> impl Iterator<Item = &WitnessReceipt> {
self.receipts.values()
}
}
impl Default for ReceiptLog {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::GateDecision;
#[test]
fn test_receipt_hash() {
let receipt = WitnessReceipt {
sequence: 0,
token: PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
},
previous_hash: [0u8; 32],
witness_summary: WitnessSummary::empty(),
timestamp_proof: TimestampProof {
timestamp: 1000,
previous_receipt_hash: [0u8; 32],
merkle_root: [0u8; 32],
},
};
let hash = receipt.hash();
assert_ne!(hash, [0u8; 32]);
}
#[test]
fn test_receipt_log_chain() {
let mut log = ReceiptLog::new();
for i in 0..3 {
let receipt = WitnessReceipt {
sequence: i,
token: PermitToken {
decision: GateDecision::Permit,
action_id: format!("action-{}", i),
timestamp: 1000 + i,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: i,
signature: [0u8; 64],
},
previous_hash: log.last_hash(),
witness_summary: WitnessSummary::empty(),
timestamp_proof: TimestampProof {
timestamp: 1000 + i,
previous_receipt_hash: log.last_hash(),
merkle_root: [0u8; 32],
},
};
log.append(receipt);
}
assert_eq!(log.len(), 3);
assert!(log.verify_chain_to(2).is_ok());
}
}

View File

@@ -0,0 +1,392 @@
//! Deterministic replay for auditing and debugging
//!
//! This module provides the ability to replay gate decisions for audit purposes,
//! ensuring that the same inputs produce the same outputs deterministically.
use crate::{GateDecision, WitnessReceipt, WitnessSummary};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Result of replaying a decision
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplayResult {
/// The replayed decision
pub decision: GateDecision,
/// Whether the replay matched the original
pub matched: bool,
/// Original decision from receipt
pub original_decision: GateDecision,
/// State snapshot at decision time
pub state_snapshot: WitnessSummary,
/// Differences if any
pub differences: Vec<ReplayDifference>,
}
/// A difference found during replay
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReplayDifference {
/// Field that differs
pub field: String,
/// Original value
pub original: String,
/// Replayed value
pub replayed: String,
}
/// Snapshot of state for replay
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StateSnapshot {
/// Sequence number
pub sequence: u64,
/// Timestamp
pub timestamp: u64,
/// Global min-cut value
pub global_min_cut: f64,
/// Aggregate e-value
pub aggregate_e_value: f64,
/// Minimum coherence
pub min_coherence: i16,
/// Tile states
pub tile_states: HashMap<u8, TileSnapshot>,
}
/// Snapshot of a single tile's state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TileSnapshot {
/// Tile ID
pub tile_id: u8,
/// Coherence
pub coherence: i16,
/// E-value
pub e_value: f32,
/// Boundary edge count
pub boundary_edges: usize,
}
/// Engine for replaying decisions
pub struct ReplayEngine {
/// Checkpoints for state restoration
checkpoints: HashMap<u64, StateSnapshot>,
/// Checkpoint interval
checkpoint_interval: u64,
}
impl ReplayEngine {
/// Create a new replay engine
pub fn new(checkpoint_interval: u64) -> Self {
Self {
checkpoints: HashMap::new(),
checkpoint_interval,
}
}
/// Save a checkpoint
pub fn save_checkpoint(&mut self, sequence: u64, snapshot: StateSnapshot) {
if sequence % self.checkpoint_interval == 0 {
self.checkpoints.insert(sequence, snapshot);
}
}
/// Find the nearest checkpoint before a sequence
pub fn find_nearest_checkpoint(&self, sequence: u64) -> Option<(u64, &StateSnapshot)> {
self.checkpoints
.iter()
.filter(|(seq, _)| **seq <= sequence)
.max_by_key(|(seq, _)| *seq)
.map(|(seq, snap)| (*seq, snap))
}
/// Replay a decision from a receipt
pub fn replay(&self, receipt: &WitnessReceipt) -> ReplayResult {
// Get the witness summary from the receipt
let summary = &receipt.witness_summary;
// Reconstruct the decision based on the witness data
let replayed_decision = self.reconstruct_decision(summary);
// Compare with original
let original_decision = receipt.token.decision;
let matched = replayed_decision == original_decision;
let mut differences = Vec::new();
if !matched {
differences.push(ReplayDifference {
field: "decision".to_string(),
original: format!("{:?}", original_decision),
replayed: format!("{:?}", replayed_decision),
});
}
ReplayResult {
decision: replayed_decision,
matched,
original_decision,
state_snapshot: summary.clone(),
differences,
}
}
/// Reconstruct decision from witness summary
fn reconstruct_decision(&self, summary: &WitnessSummary) -> GateDecision {
// Apply the same three-filter logic as in TileZero
// 1. Structural filter
if summary.structural.partition == "fragile" {
return GateDecision::Deny;
}
// 2. Evidence filter
if summary.evidential.verdict == "reject" {
return GateDecision::Deny;
}
if summary.evidential.verdict == "continue" {
return GateDecision::Defer;
}
// 3. Prediction filter
if summary.predictive.set_size > 20 {
return GateDecision::Defer;
}
GateDecision::Permit
}
/// Verify a sequence of receipts for consistency
pub fn verify_sequence(&self, receipts: &[WitnessReceipt]) -> SequenceVerification {
let mut results = Vec::new();
let mut all_matched = true;
for receipt in receipts {
let result = self.replay(receipt);
if !result.matched {
all_matched = false;
}
results.push((receipt.sequence, result));
}
SequenceVerification {
total_receipts: receipts.len(),
all_matched,
results,
}
}
/// Export checkpoint for external storage
pub fn export_checkpoint(&self, sequence: u64) -> Option<Vec<u8>> {
self.checkpoints
.get(&sequence)
.and_then(|snap| serde_json::to_vec(snap).ok())
}
/// Import checkpoint from external storage
pub fn import_checkpoint(&mut self, sequence: u64, data: &[u8]) -> Result<(), ReplayError> {
let snapshot: StateSnapshot =
serde_json::from_slice(data).map_err(|_| ReplayError::InvalidCheckpoint)?;
self.checkpoints.insert(sequence, snapshot);
Ok(())
}
/// Clear old checkpoints to manage memory
pub fn prune_before(&mut self, sequence: u64) {
self.checkpoints.retain(|seq, _| *seq >= sequence);
}
/// Get checkpoint count
pub fn checkpoint_count(&self) -> usize {
self.checkpoints.len()
}
}
impl Default for ReplayEngine {
fn default() -> Self {
Self::new(100)
}
}
/// Result of verifying a sequence of receipts
#[derive(Debug)]
pub struct SequenceVerification {
/// Total number of receipts verified
pub total_receipts: usize,
/// Whether all replays matched
pub all_matched: bool,
/// Individual results
pub results: Vec<(u64, ReplayResult)>,
}
impl SequenceVerification {
/// Get the mismatches
pub fn mismatches(&self) -> impl Iterator<Item = &(u64, ReplayResult)> {
self.results.iter().filter(|(_, r)| !r.matched)
}
/// Get mismatch count
pub fn mismatch_count(&self) -> usize {
self.results.iter().filter(|(_, r)| !r.matched).count()
}
}
/// Error during replay
#[derive(Debug, thiserror::Error)]
pub enum ReplayError {
#[error("Receipt not found for sequence {sequence}")]
ReceiptNotFound { sequence: u64 },
#[error("Checkpoint not found for sequence {sequence}")]
CheckpointNotFound { sequence: u64 },
#[error("Invalid checkpoint data")]
InvalidCheckpoint,
#[error("State reconstruction failed: {reason}")]
ReconstructionFailed { reason: String },
#[error("Hash chain verification failed at sequence {sequence}")]
ChainVerificationFailed { sequence: u64 },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
EvidentialWitness, PermitToken, PredictiveWitness, StructuralWitness, TimestampProof,
};
fn create_test_receipt(sequence: u64, decision: GateDecision) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: PermitToken {
decision,
action_id: format!("action-{}", sequence),
timestamp: 1000 + sequence,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
},
previous_hash: [0u8; 32],
witness_summary: WitnessSummary {
structural: StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 0,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 5,
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 100.0,
verdict: "accept".to_string(),
},
},
timestamp_proof: TimestampProof {
timestamp: 1000 + sequence,
previous_receipt_hash: [0u8; 32],
merkle_root: [0u8; 32],
},
}
}
#[test]
fn test_replay_matching() {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(0, GateDecision::Permit);
let result = engine.replay(&receipt);
assert!(result.matched);
assert_eq!(result.decision, GateDecision::Permit);
}
#[test]
fn test_replay_mismatch() {
let engine = ReplayEngine::new(100);
let mut receipt = create_test_receipt(0, GateDecision::Permit);
// Modify the witness to indicate a deny condition
receipt.witness_summary.structural.partition = "fragile".to_string();
let result = engine.replay(&receipt);
assert!(!result.matched);
assert_eq!(result.decision, GateDecision::Deny);
assert!(!result.differences.is_empty());
}
#[test]
fn test_checkpoint_save_load() {
let mut engine = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(0, snapshot.clone());
assert_eq!(engine.checkpoint_count(), 1);
let (seq, found) = engine.find_nearest_checkpoint(5).unwrap();
assert_eq!(seq, 0);
assert_eq!(found.global_min_cut, 10.0);
}
#[test]
fn test_sequence_verification() {
let engine = ReplayEngine::new(100);
let receipts = vec![
create_test_receipt(0, GateDecision::Permit),
create_test_receipt(1, GateDecision::Permit),
create_test_receipt(2, GateDecision::Permit),
];
let verification = engine.verify_sequence(&receipts);
assert_eq!(verification.total_receipts, 3);
assert!(verification.all_matched);
assert_eq!(verification.mismatch_count(), 0);
}
#[test]
fn test_prune_checkpoints() {
let mut engine = ReplayEngine::new(10);
for i in (0..100).step_by(10) {
let snapshot = StateSnapshot {
sequence: i as u64,
timestamp: 1000 + i as u64,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(i as u64, snapshot);
}
assert_eq!(engine.checkpoint_count(), 10);
engine.prune_before(50);
assert_eq!(engine.checkpoint_count(), 5);
}
#[test]
fn test_checkpoint_export_import() {
let mut engine = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(0, snapshot);
let exported = engine.export_checkpoint(0).unwrap();
let mut engine2 = ReplayEngine::new(10);
engine2.import_checkpoint(0, &exported).unwrap();
assert_eq!(engine2.checkpoint_count(), 1);
}
}

View File

@@ -0,0 +1,218 @@
//! Reduced supergraph from worker tile summaries
use crate::receipt::WitnessSummary;
use crate::{TileId, WitnessFragment};
use std::collections::HashMap;
/// Reduced graph maintained by TileZero
pub struct ReducedGraph {
/// Coherence scores per tile
tile_coherence: HashMap<TileId, f32>,
/// Global cut value
global_cut_value: f64,
/// Aggregated e-value
aggregated_e_value: f64,
/// Shift pressure
shift_pressure: f64,
/// Boundary edge count
boundary_edges: usize,
}
impl ReducedGraph {
/// Create a new reduced graph
pub fn new() -> Self {
Self {
tile_coherence: HashMap::new(),
global_cut_value: 100.0, // Start with high coherence
aggregated_e_value: 100.0, // Start with high evidence
shift_pressure: 0.0,
boundary_edges: 0,
}
}
/// Update from a witness fragment
pub fn update_from_fragment(&mut self, fragment: &WitnessFragment) {
self.boundary_edges = fragment.boundary_edges.len();
// Update global cut based on local cuts
self.global_cut_value = self.global_cut_value.min(fragment.cut_value as f64);
}
/// Update coherence for a tile
pub fn update_coherence(&mut self, tile_id: TileId, coherence: f32) {
self.tile_coherence.insert(tile_id, coherence);
// Recompute aggregates
if !self.tile_coherence.is_empty() {
let sum: f32 = self.tile_coherence.values().sum();
let avg = sum / self.tile_coherence.len() as f32;
// Use average coherence to influence e-value
self.aggregated_e_value = (avg as f64) * 100.0;
}
}
/// Get the global cut value
pub fn global_cut(&self) -> f64 {
self.global_cut_value
}
/// Aggregate shift pressure across tiles
pub fn aggregate_shift_pressure(&self) -> f64 {
self.shift_pressure
}
/// Aggregate evidence across tiles
pub fn aggregate_evidence(&self) -> f64 {
self.aggregated_e_value
}
/// Generate witness summary
pub fn witness_summary(&self) -> WitnessSummary {
use crate::receipt::{EvidentialWitness, PredictiveWitness, StructuralWitness};
let partition = if self.global_cut_value >= 10.0 {
"stable"
} else if self.global_cut_value >= 5.0 {
"marginal"
} else {
"fragile"
};
let verdict = if self.aggregated_e_value >= 100.0 {
"accept"
} else if self.aggregated_e_value >= 0.01 {
"continue"
} else {
"reject"
};
WitnessSummary {
structural: StructuralWitness {
cut_value: self.global_cut_value,
partition: partition.to_string(),
critical_edges: self.boundary_edges,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 1, // Simplified
coverage: 0.95,
},
evidential: EvidentialWitness {
e_value: self.aggregated_e_value,
verdict: verdict.to_string(),
},
}
}
/// Set shift pressure (for testing or external updates)
pub fn set_shift_pressure(&mut self, pressure: f64) {
self.shift_pressure = pressure;
}
/// Set global cut value (for testing or external updates)
pub fn set_global_cut(&mut self, cut: f64) {
self.global_cut_value = cut;
}
/// Set aggregated evidence (for testing or external updates)
pub fn set_evidence(&mut self, evidence: f64) {
self.aggregated_e_value = evidence;
}
}
impl Default for ReducedGraph {
fn default() -> Self {
Self::new()
}
}
/// Structural filter for graph-based decisions
pub struct StructuralFilter {
/// Minimum cut threshold
min_cut: f64,
}
impl StructuralFilter {
/// Create a new structural filter
pub fn new(min_cut: f64) -> Self {
Self { min_cut }
}
/// Evaluate if structure is stable
pub fn is_stable(&self, graph: &ReducedGraph) -> bool {
graph.global_cut() >= self.min_cut
}
}
/// Shift pressure tracking
pub struct ShiftPressure {
/// Current pressure
current: f64,
/// Threshold for deferral
threshold: f64,
}
impl ShiftPressure {
/// Create new shift pressure tracker
pub fn new(threshold: f64) -> Self {
Self {
current: 0.0,
threshold,
}
}
/// Update with new observation
pub fn update(&mut self, value: f64) {
// Exponential moving average
self.current = 0.9 * self.current + 0.1 * value;
}
/// Check if shift is detected
pub fn is_shifting(&self) -> bool {
self.current >= self.threshold
}
/// Get current pressure
pub fn current(&self) -> f64 {
self.current
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reduced_graph() {
let mut graph = ReducedGraph::new();
assert!(graph.global_cut() >= 100.0);
graph.update_coherence(1, 0.9);
graph.update_coherence(2, 0.8);
let summary = graph.witness_summary();
assert_eq!(summary.structural.partition, "stable");
}
#[test]
fn test_structural_filter() {
let filter = StructuralFilter::new(5.0);
let mut graph = ReducedGraph::new();
assert!(filter.is_stable(&graph));
graph.set_global_cut(3.0);
assert!(!filter.is_stable(&graph));
}
#[test]
fn test_shift_pressure() {
let mut pressure = ShiftPressure::new(0.5);
for _ in 0..20 {
pressure.update(0.8);
}
assert!(pressure.is_shifting());
}
}

View File

@@ -0,0 +1,506 @@
//! Comprehensive tests for PERMIT/DEFER/DENY decision logic
//!
//! Tests cover:
//! - Three-filter decision pipeline
//! - Threshold configurations
//! - Edge cases and boundary conditions
//! - Security scenarios (policy violations, replay detection)
use cognitum_gate_tilezero::decision::{EvidenceDecision, GateDecision, GateThresholds};
#[cfg(test)]
mod gate_decision {
use super::*;
#[test]
fn test_decision_display() {
assert_eq!(GateDecision::Permit.to_string(), "permit");
assert_eq!(GateDecision::Defer.to_string(), "defer");
assert_eq!(GateDecision::Deny.to_string(), "deny");
}
#[test]
fn test_decision_equality() {
assert_eq!(GateDecision::Permit, GateDecision::Permit);
assert_eq!(GateDecision::Defer, GateDecision::Defer);
assert_eq!(GateDecision::Deny, GateDecision::Deny);
assert_ne!(GateDecision::Permit, GateDecision::Defer);
assert_ne!(GateDecision::Permit, GateDecision::Deny);
assert_ne!(GateDecision::Defer, GateDecision::Deny);
}
}
#[cfg(test)]
mod evidence_decision {
use super::*;
#[test]
fn test_evidence_values() {
let accept = EvidenceDecision::Accept;
let cont = EvidenceDecision::Continue;
let reject = EvidenceDecision::Reject;
assert_eq!(accept, EvidenceDecision::Accept);
assert_eq!(cont, EvidenceDecision::Continue);
assert_eq!(reject, EvidenceDecision::Reject);
}
}
#[cfg(test)]
mod threshold_configuration {
use super::*;
#[test]
fn test_default_thresholds() {
let thresholds = GateThresholds::default();
assert_eq!(thresholds.tau_deny, 0.01);
assert_eq!(thresholds.tau_permit, 100.0);
assert_eq!(thresholds.min_cut, 5.0);
assert_eq!(thresholds.max_shift, 0.5);
assert_eq!(thresholds.permit_ttl_ns, 60_000_000_000);
}
#[test]
fn test_custom_thresholds() {
let thresholds = GateThresholds {
tau_deny: 0.05,
tau_permit: 50.0,
min_cut: 10.0,
max_shift: 0.3,
permit_ttl_ns: 30_000_000_000,
theta_uncertainty: 15.0,
theta_confidence: 3.0,
};
assert_eq!(thresholds.tau_deny, 0.05);
assert_eq!(thresholds.tau_permit, 50.0);
assert_eq!(thresholds.min_cut, 10.0);
}
#[test]
fn test_threshold_ordering() {
let thresholds = GateThresholds::default();
// tau_deny < 1 < tau_permit (typical e-process thresholds)
assert!(thresholds.tau_deny < 1.0);
assert!(thresholds.tau_permit > 1.0);
assert!(thresholds.tau_deny < thresholds.tau_permit);
}
#[test]
fn test_conformal_thresholds() {
let thresholds = GateThresholds::default();
// theta_confidence < theta_uncertainty (smaller set = more confident)
assert!(thresholds.theta_confidence < thresholds.theta_uncertainty);
}
}
#[cfg(test)]
mod three_filter_logic {
use super::*;
/// Test the structural filter (min-cut check)
#[test]
fn test_structural_filter_deny() {
// If min-cut is below threshold, should DENY
let thresholds = GateThresholds::default();
// Low min-cut (below threshold of 5.0)
let min_cut = 3.0;
let shift_pressure = 0.1; // OK
let e_aggregate = 150.0; // OK
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
/// Test the shift filter (coherence check)
#[test]
fn test_shift_filter_defer() {
let thresholds = GateThresholds::default();
// OK min-cut, high shift pressure
let min_cut = 10.0; // OK
let shift_pressure = 0.8; // Above threshold of 0.5
let e_aggregate = 150.0; // OK
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Defer);
}
/// Test the evidence filter (e-value check)
#[test]
fn test_evidence_filter_deny() {
let thresholds = GateThresholds::default();
// OK min-cut, OK shift, low e-value (evidence against coherence)
let min_cut = 10.0;
let shift_pressure = 0.1;
let e_aggregate = 0.005; // Below tau_deny of 0.01
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
#[test]
fn test_evidence_filter_defer() {
let thresholds = GateThresholds::default();
// OK min-cut, OK shift, moderate e-value (insufficient evidence)
let min_cut = 10.0;
let shift_pressure = 0.1;
let e_aggregate = 50.0; // Between tau_deny (0.01) and tau_permit (100)
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Defer);
}
#[test]
fn test_all_filters_pass_permit() {
let thresholds = GateThresholds::default();
// Everything OK
let min_cut = 10.0;
let shift_pressure = 0.1;
let e_aggregate = 150.0; // Above tau_permit of 100
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Permit);
}
// Helper function to simulate the three-filter logic
fn apply_three_filters(
min_cut: f64,
shift_pressure: f64,
e_aggregate: f64,
thresholds: &GateThresholds,
) -> GateDecision {
// 1. Structural filter
if min_cut < thresholds.min_cut {
return GateDecision::Deny;
}
// 2. Shift filter
if shift_pressure >= thresholds.max_shift {
return GateDecision::Defer;
}
// 3. Evidence filter
if e_aggregate < thresholds.tau_deny {
return GateDecision::Deny;
}
if e_aggregate < thresholds.tau_permit {
return GateDecision::Defer;
}
GateDecision::Permit
}
}
#[cfg(test)]
mod boundary_conditions {
use super::*;
#[test]
fn test_min_cut_at_threshold() {
let thresholds = GateThresholds::default();
// Exactly at threshold
let decision = decide_structural(5.0, &thresholds);
assert_eq!(decision, GateDecision::Permit); // >= threshold is OK
}
#[test]
fn test_min_cut_just_below() {
let thresholds = GateThresholds::default();
let decision = decide_structural(4.999, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
#[test]
fn test_e_value_at_deny_threshold() {
let thresholds = GateThresholds::default();
let decision = decide_evidence(0.01, &thresholds);
assert_eq!(decision, EvidenceDecision::Continue); // Exactly at threshold continues
}
#[test]
fn test_e_value_at_permit_threshold() {
let thresholds = GateThresholds::default();
let decision = decide_evidence(100.0, &thresholds);
assert_eq!(decision, EvidenceDecision::Accept);
}
#[test]
fn test_zero_values() {
let thresholds = GateThresholds::default();
assert_eq!(decide_structural(0.0, &thresholds), GateDecision::Deny);
assert_eq!(decide_evidence(0.0, &thresholds), EvidenceDecision::Reject);
}
// Helper functions
fn decide_structural(min_cut: f64, thresholds: &GateThresholds) -> GateDecision {
if min_cut >= thresholds.min_cut {
GateDecision::Permit
} else {
GateDecision::Deny
}
}
fn decide_evidence(e_aggregate: f64, thresholds: &GateThresholds) -> EvidenceDecision {
if e_aggregate < thresholds.tau_deny {
EvidenceDecision::Reject
} else if e_aggregate >= thresholds.tau_permit {
EvidenceDecision::Accept
} else {
EvidenceDecision::Continue
}
}
}
#[cfg(test)]
mod filter_priority {
use super::*;
/// Structural filter has highest priority (checked first)
#[test]
fn test_structural_overrides_evidence() {
let thresholds = GateThresholds::default();
// Low min-cut but high e-value
let min_cut = 1.0; // Fail structural
let e_aggregate = 1000.0; // Would pass evidence
// Structural failure should result in DENY
let decision = if min_cut < thresholds.min_cut {
GateDecision::Deny
} else if e_aggregate >= thresholds.tau_permit {
GateDecision::Permit
} else {
GateDecision::Defer
};
assert_eq!(decision, GateDecision::Deny);
}
/// Shift filter checked after structural
#[test]
fn test_shift_overrides_evidence() {
let thresholds = GateThresholds::default();
// Good min-cut, high shift, high e-value
let min_cut = 10.0; // Pass structural
let shift_pressure = 0.9; // Fail shift
let e_aggregate = 1000.0; // Would pass evidence
let decision = if min_cut < thresholds.min_cut {
GateDecision::Deny
} else if shift_pressure >= thresholds.max_shift {
GateDecision::Defer
} else if e_aggregate >= thresholds.tau_permit {
GateDecision::Permit
} else {
GateDecision::Defer
};
assert_eq!(decision, GateDecision::Defer);
}
}
#[cfg(test)]
mod ttl_scenarios {
use super::*;
#[test]
fn test_permit_ttl() {
let thresholds = GateThresholds::default();
assert_eq!(thresholds.permit_ttl_ns, 60_000_000_000); // 60 seconds
}
#[test]
fn test_custom_short_ttl() {
let thresholds = GateThresholds {
permit_ttl_ns: 1_000_000_000, // 1 second
..Default::default()
};
assert_eq!(thresholds.permit_ttl_ns, 1_000_000_000);
}
#[test]
fn test_custom_long_ttl() {
let thresholds = GateThresholds {
permit_ttl_ns: 3600_000_000_000, // 1 hour
..Default::default()
};
assert_eq!(thresholds.permit_ttl_ns, 3600_000_000_000);
}
}
#[cfg(test)]
mod extreme_values {
use super::*;
#[test]
fn test_very_high_e_value() {
let thresholds = GateThresholds::default();
let decision = decide_evidence_full(1e10, &thresholds);
assert_eq!(decision, EvidenceDecision::Accept);
}
#[test]
fn test_very_low_e_value() {
let thresholds = GateThresholds::default();
let decision = decide_evidence_full(1e-10, &thresholds);
assert_eq!(decision, EvidenceDecision::Reject);
}
#[test]
fn test_very_high_min_cut() {
let thresholds = GateThresholds::default();
let decision = decide_structural_full(1000.0, &thresholds);
assert_eq!(decision, GateDecision::Permit);
}
// Helper
fn decide_evidence_full(e_aggregate: f64, thresholds: &GateThresholds) -> EvidenceDecision {
if e_aggregate < thresholds.tau_deny {
EvidenceDecision::Reject
} else if e_aggregate >= thresholds.tau_permit {
EvidenceDecision::Accept
} else {
EvidenceDecision::Continue
}
}
fn decide_structural_full(min_cut: f64, thresholds: &GateThresholds) -> GateDecision {
if min_cut >= thresholds.min_cut {
GateDecision::Permit
} else {
GateDecision::Deny
}
}
}
#[cfg(test)]
mod serialization {
use super::*;
#[test]
fn test_decision_serialization() {
let decisions = [
GateDecision::Permit,
GateDecision::Defer,
GateDecision::Deny,
];
for decision in &decisions {
let json = serde_json::to_string(decision).unwrap();
let restored: GateDecision = serde_json::from_str(&json).unwrap();
assert_eq!(*decision, restored);
}
}
#[test]
fn test_decision_json_values() {
assert_eq!(
serde_json::to_string(&GateDecision::Permit).unwrap(),
"\"permit\""
);
assert_eq!(
serde_json::to_string(&GateDecision::Defer).unwrap(),
"\"defer\""
);
assert_eq!(
serde_json::to_string(&GateDecision::Deny).unwrap(),
"\"deny\""
);
}
#[test]
fn test_thresholds_serialization() {
let thresholds = GateThresholds::default();
let json = serde_json::to_string(&thresholds).unwrap();
let restored: GateThresholds = serde_json::from_str(&json).unwrap();
assert_eq!(thresholds.tau_deny, restored.tau_deny);
assert_eq!(thresholds.tau_permit, restored.tau_permit);
assert_eq!(thresholds.min_cut, restored.min_cut);
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_permit_requires_all_pass(
min_cut in 0.0f64..100.0,
shift in 0.0f64..1.0,
e_val in 0.001f64..1000.0
) {
let thresholds = GateThresholds::default();
let structural_ok = min_cut >= thresholds.min_cut;
let shift_ok = shift < thresholds.max_shift;
let evidence_ok = e_val >= thresholds.tau_permit;
let decision = apply_filters(min_cut, shift, e_val, &thresholds);
if decision == GateDecision::Permit {
assert!(structural_ok && shift_ok && evidence_ok);
}
}
#[test]
fn prop_structural_fail_is_deny(min_cut in 0.0f64..4.9) {
let thresholds = GateThresholds::default();
// Any structural failure (min_cut < 5.0) should result in Deny
let decision = apply_filters(min_cut, 0.0, 1000.0, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
#[test]
fn prop_evidence_deny_threshold(e_val in 0.0f64..0.009) {
let thresholds = GateThresholds::default();
// E-value below tau_deny should result in Deny (if structural passes)
let decision = apply_filters(100.0, 0.0, e_val, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
}
fn apply_filters(
min_cut: f64,
shift_pressure: f64,
e_aggregate: f64,
thresholds: &GateThresholds,
) -> GateDecision {
if min_cut < thresholds.min_cut {
return GateDecision::Deny;
}
if shift_pressure >= thresholds.max_shift {
return GateDecision::Defer;
}
if e_aggregate < thresholds.tau_deny {
return GateDecision::Deny;
}
if e_aggregate < thresholds.tau_permit {
return GateDecision::Defer;
}
GateDecision::Permit
}
}

View File

@@ -0,0 +1,578 @@
//! Comprehensive tests for report merging from multiple tiles
//!
//! Tests cover:
//! - Merging strategies (SimpleAverage, WeightedAverage, Median, Maximum, BFT)
//! - Edge cases (empty reports, conflicting epochs)
//! - Node and edge aggregation
//! - Property-based tests for merge invariants
use cognitum_gate_tilezero::merge::{
EdgeSummary, MergeError, MergeStrategy, MergedReport, NodeSummary, ReportMerger, WorkerReport,
};
fn create_test_report(tile_id: u8, epoch: u64) -> WorkerReport {
let mut report = WorkerReport::new(tile_id, epoch);
report.confidence = 0.9;
report.local_mincut = 1.0;
report
}
fn add_test_node(report: &mut WorkerReport, id: &str, weight: f64, coherence: f64) {
report.add_node(NodeSummary {
id: id.to_string(),
weight,
edge_count: 5,
coherence,
});
}
fn add_test_boundary_edge(report: &mut WorkerReport, source: &str, target: &str, capacity: f64) {
report.add_boundary_edge(EdgeSummary {
source: source.to_string(),
target: target.to_string(),
capacity,
is_boundary: true,
});
}
#[cfg(test)]
mod basic_merging {
use super::*;
#[test]
fn test_merge_single_report() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut report = create_test_report(1, 0);
add_test_node(&mut report, "node1", 1.0, 0.9);
let merged = merger.merge(&[report]).unwrap();
assert_eq!(merged.worker_count, 1);
assert_eq!(merged.epoch, 0);
assert!(merged.super_nodes.contains_key("node1"));
}
#[test]
fn test_merge_multiple_reports() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = (1..=3)
.map(|i| {
let mut report = create_test_report(i, 0);
add_test_node(&mut report, "node1", i as f64 * 0.1, 0.9);
report
})
.collect();
let merged = merger.merge(&reports).unwrap();
assert_eq!(merged.worker_count, 3);
let node = merged.super_nodes.get("node1").unwrap();
// Average of 0.1, 0.2, 0.3 = 0.2
assert!((node.weight - 0.2).abs() < 0.001);
}
#[test]
fn test_merge_empty_reports() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let result = merger.merge(&[]);
assert!(matches!(result, Err(MergeError::EmptyReports)));
}
#[test]
fn test_merge_conflicting_epochs() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports = vec![create_test_report(1, 0), create_test_report(2, 1)];
let result = merger.merge(&reports);
assert!(matches!(result, Err(MergeError::ConflictingEpochs)));
}
}
#[cfg(test)]
mod merge_strategies {
use super::*;
#[test]
fn test_simple_average() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = [1.0, 2.0, 3.0]
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 2.0).abs() < 0.001);
}
#[test]
fn test_weighted_average() {
let merger = ReportMerger::new(MergeStrategy::WeightedAverage);
let mut reports = Vec::new();
// High coherence node has weight 1.0, low coherence has weight 3.0
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node", 1.0, 0.9);
reports.push(r1);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "node", 3.0, 0.3);
reports.push(r2);
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
// Weight should be biased toward the high-coherence value
// weighted = (1.0 * 0.9 + 3.0 * 0.3) / (0.9 + 0.3) = 1.8 / 1.2 = 1.5
assert!((node.weight - 1.5).abs() < 0.001);
}
#[test]
fn test_median() {
let merger = ReportMerger::new(MergeStrategy::Median);
let weights = [1.0, 5.0, 2.0, 8.0, 3.0]; // Median = 3.0
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 3.0).abs() < 0.001);
}
#[test]
fn test_median_even_count() {
let merger = ReportMerger::new(MergeStrategy::Median);
let weights = [1.0, 2.0, 3.0, 4.0]; // Median = (2.0 + 3.0) / 2 = 2.5
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 2.5).abs() < 0.001);
}
#[test]
fn test_maximum() {
let merger = ReportMerger::new(MergeStrategy::Maximum);
let weights = [1.0, 5.0, 2.0, 8.0, 3.0];
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 8.0).abs() < 0.001);
}
#[test]
fn test_byzantine_fault_tolerant() {
let merger = ReportMerger::new(MergeStrategy::ByzantineFaultTolerant);
// 6 reports: 4 honest (weight ~2.0), 2 Byzantine (weight 100.0)
let mut reports = Vec::new();
for i in 0..4 {
let mut r = create_test_report(i, 0);
add_test_node(&mut r, "node", 2.0, 0.9);
reports.push(r);
}
for i in 4..6 {
let mut r = create_test_report(i, 0);
add_test_node(&mut r, "node", 100.0, 0.9);
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
// BFT should exclude Byzantine values (top 2/3 of sorted = 4 lowest)
// Average of 4 lowest: 2.0
assert!(node.weight < 50.0); // Should not be influenced by 100.0
}
}
#[cfg(test)]
mod edge_merging {
use super::*;
#[test]
fn test_merge_boundary_edges() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_boundary_edge(&mut r1, "A", "B", 1.0);
add_test_boundary_edge(&mut r1, "B", "C", 2.0);
let mut r2 = create_test_report(2, 0);
add_test_boundary_edge(&mut r2, "A", "B", 3.0); // Same edge, different capacity
add_test_boundary_edge(&mut r2, "C", "D", 4.0);
let merged = merger.merge(&[r1, r2]).unwrap();
// Should have 3 unique edges
assert_eq!(merged.boundary_edges.len(), 3);
// Find the A-B edge
let ab_edge = merged
.boundary_edges
.iter()
.find(|e| (e.source == "A" && e.target == "B") || (e.source == "B" && e.target == "A"))
.unwrap();
// Average of 1.0 and 3.0 = 2.0
assert!((ab_edge.capacity - 2.0).abs() < 0.001);
assert_eq!(ab_edge.report_count, 2);
}
#[test]
fn test_edge_normalization() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_boundary_edge(&mut r1, "A", "B", 1.0);
let mut r2 = create_test_report(2, 0);
add_test_boundary_edge(&mut r2, "B", "A", 1.0); // Reverse order
let merged = merger.merge(&[r1, r2]).unwrap();
// Should be recognized as the same edge
assert_eq!(merged.boundary_edges.len(), 1);
assert_eq!(merged.boundary_edges[0].report_count, 2);
}
}
#[cfg(test)]
mod node_aggregation {
use super::*;
#[test]
fn test_contributors_tracked() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node", 1.0, 0.9);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "node", 2.0, 0.9);
let merged = merger.merge(&[r1, r2]).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!(node.contributors.contains(&1));
assert!(node.contributors.contains(&2));
}
#[test]
fn test_edge_count_summed() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
r1.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 10,
coherence: 0.9,
});
let mut r2 = create_test_report(2, 0);
r2.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 20,
coherence: 0.9,
});
let merged = merger.merge(&[r1, r2]).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert_eq!(node.total_edge_count, 30);
}
#[test]
fn test_coherence_averaged() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
r1.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 5,
coherence: 0.8,
});
let mut r2 = create_test_report(2, 0);
r2.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 5,
coherence: 0.6,
});
let merged = merger.merge(&[r1, r2]).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.avg_coherence - 0.7).abs() < 0.001);
}
}
#[cfg(test)]
mod global_mincut_estimate {
use super::*;
#[test]
fn test_mincut_from_local_values() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut reports = Vec::new();
for i in 0..3 {
let mut r = create_test_report(i, 0);
r.local_mincut = 1.0 + i as f64;
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
// Should have some estimate based on local values
assert!(merged.global_mincut_estimate > 0.0);
}
#[test]
fn test_mincut_with_boundaries() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
r1.local_mincut = 5.0;
add_test_boundary_edge(&mut r1, "A", "B", 1.0);
let merged = merger.merge(&[r1]).unwrap();
// Boundary edges should affect the estimate
assert!(merged.global_mincut_estimate > 0.0);
}
}
#[cfg(test)]
mod confidence_aggregation {
use super::*;
#[test]
fn test_geometric_mean_confidence() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut reports = Vec::new();
for i in 0..3 {
let mut r = create_test_report(i, 0);
r.confidence = 0.8;
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
// Geometric mean of [0.8, 0.8, 0.8] = 0.8
assert!((merged.confidence - 0.8).abs() < 0.001);
}
#[test]
fn test_bft_confidence() {
let merger = ReportMerger::new(MergeStrategy::ByzantineFaultTolerant);
let mut reports = Vec::new();
let confidences = [0.9, 0.85, 0.88, 0.2, 0.1]; // Two low-confidence outliers
for (i, &c) in confidences.iter().enumerate() {
let mut r = create_test_report(i as u8, 0);
r.confidence = c;
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
// BFT should use conservative estimate (minimum of top 2/3)
assert!(merged.confidence > 0.5); // Should not be dragged down by 0.1, 0.2
}
}
#[cfg(test)]
mod state_hash {
use super::*;
#[test]
fn test_state_hash_computed() {
let mut report = create_test_report(1, 0);
add_test_node(&mut report, "node1", 1.0, 0.9);
report.compute_state_hash();
assert_ne!(report.state_hash, [0u8; 32]);
}
#[test]
fn test_state_hash_deterministic() {
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node1", 1.0, 0.9);
r1.compute_state_hash();
let mut r2 = create_test_report(1, 0);
add_test_node(&mut r2, "node1", 1.0, 0.9);
r2.compute_state_hash();
assert_eq!(r1.state_hash, r2.state_hash);
}
#[test]
fn test_state_hash_changes_with_data() {
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node1", 1.0, 0.9);
r1.compute_state_hash();
let mut r2 = create_test_report(1, 0);
add_test_node(&mut r2, "node1", 2.0, 0.9); // Different weight
r2.compute_state_hash();
assert_ne!(r1.state_hash, r2.state_hash);
}
}
#[cfg(test)]
mod multiple_nodes {
use super::*;
#[test]
fn test_merge_disjoint_nodes() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node_a", 1.0, 0.9);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "node_b", 2.0, 0.9);
let merged = merger.merge(&[r1, r2]).unwrap();
assert!(merged.super_nodes.contains_key("node_a"));
assert!(merged.super_nodes.contains_key("node_b"));
assert_eq!(merged.super_nodes.len(), 2);
}
#[test]
fn test_merge_overlapping_nodes() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "shared", 1.0, 0.9);
add_test_node(&mut r1, "only_r1", 2.0, 0.9);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "shared", 3.0, 0.9);
add_test_node(&mut r2, "only_r2", 4.0, 0.9);
let merged = merger.merge(&[r1, r2]).unwrap();
assert_eq!(merged.super_nodes.len(), 3);
let shared = merged.super_nodes.get("shared").unwrap();
assert!((shared.weight - 2.0).abs() < 0.001); // Average of 1.0 and 3.0
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_merge_preserves_epoch(epoch in 0u64..1000) {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let r1 = create_test_report(1, epoch);
let r2 = create_test_report(2, epoch);
let merged = merger.merge(&[r1, r2]).unwrap();
assert_eq!(merged.epoch, epoch);
}
#[test]
fn prop_merge_counts_workers(n in 1usize..10) {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = (0..n)
.map(|i| create_test_report(i as u8, 0))
.collect();
let merged = merger.merge(&reports).unwrap();
assert_eq!(merged.worker_count, n);
}
#[test]
fn prop_average_in_range(weights in proptest::collection::vec(0.1f64..100.0, 2..10)) {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
let min = weights.iter().cloned().fold(f64::INFINITY, f64::min);
let max = weights.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
assert!(node.weight >= min);
assert!(node.weight <= max);
}
#[test]
fn prop_maximum_is_largest(weights in proptest::collection::vec(0.1f64..100.0, 2..10)) {
let merger = ReportMerger::new(MergeStrategy::Maximum);
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
let max = weights.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
assert!((node.weight - max).abs() < 0.001);
}
}
}

View File

@@ -0,0 +1,608 @@
//! Comprehensive tests for permit token signing and verification
//!
//! Tests cover:
//! - Token creation and signing
//! - Signature verification
//! - TTL validation
//! - Security tests (invalid signatures, replay attacks, tamper detection)
use cognitum_gate_tilezero::permit::{
PermitState, PermitToken, TokenDecodeError, Verifier, VerifyError,
};
use cognitum_gate_tilezero::GateDecision;
fn create_test_token(action_id: &str, sequence: u64) -> PermitToken {
PermitToken {
decision: GateDecision::Permit,
action_id: action_id.to_string(),
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64,
ttl_ns: 60_000_000_000, // 60 seconds
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
}
}
#[cfg(test)]
mod token_creation {
use super::*;
#[test]
fn test_token_fields() {
let token = create_test_token("test-action", 42);
assert_eq!(token.action_id, "test-action");
assert_eq!(token.sequence, 42);
assert_eq!(token.decision, GateDecision::Permit);
assert!(token.timestamp > 0);
assert_eq!(token.ttl_ns, 60_000_000_000);
}
#[test]
fn test_token_with_different_decisions() {
let permit_token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let defer_token = PermitToken {
decision: GateDecision::Defer,
..permit_token.clone()
};
let deny_token = PermitToken {
decision: GateDecision::Deny,
..permit_token.clone()
};
assert_eq!(permit_token.decision, GateDecision::Permit);
assert_eq!(defer_token.decision, GateDecision::Defer);
assert_eq!(deny_token.decision, GateDecision::Deny);
}
}
#[cfg(test)]
mod ttl_validation {
use super::*;
#[test]
fn test_token_valid_within_ttl() {
let now_ns = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64;
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: now_ns,
ttl_ns: 60_000_000_000, // 60 seconds
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Check immediately - should be valid
assert!(token.is_valid_time(now_ns));
// Check 30 seconds later - still valid
assert!(token.is_valid_time(now_ns + 30_000_000_000));
}
#[test]
fn test_token_invalid_after_ttl() {
let timestamp = 1000000000u64;
let ttl = 60_000_000_000u64; // 60 seconds
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// After TTL expires
let after_expiry = timestamp + ttl + 1;
assert!(!token.is_valid_time(after_expiry));
}
#[test]
fn test_token_valid_at_exactly_expiry() {
let timestamp = 1000000000u64;
let ttl = 60_000_000_000u64;
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Exactly at expiry boundary
let at_expiry = timestamp + ttl;
assert!(token.is_valid_time(at_expiry));
}
#[test]
fn test_zero_ttl() {
let timestamp = 1000000000u64;
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: 0, // Immediate expiry
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Valid at exact timestamp
assert!(token.is_valid_time(timestamp));
// Invalid one nanosecond later
assert!(!token.is_valid_time(timestamp + 1));
}
}
#[cfg(test)]
mod signing {
use super::*;
#[test]
fn test_permit_state_creation() {
let state = PermitState::new();
// Should be able to get a verifier
let _verifier = state.verifier();
}
#[test]
fn test_sign_token() {
let state = PermitState::new();
let token = create_test_token("test-action", 0);
let signed = state.sign_token(token);
// MAC should be set (non-zero)
assert_ne!(signed.signature, [0u8; 64]);
}
#[test]
fn test_sign_different_tokens_different_macs() {
let state = PermitState::new();
let token1 = create_test_token("action-1", 0);
let token2 = create_test_token("action-2", 1);
let signed1 = state.sign_token(token1);
let signed2 = state.sign_token(token2);
assert_ne!(signed1.signature, signed2.signature);
}
#[test]
fn test_sign_deterministic() {
let state = PermitState::new();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed1 = state.sign_token(token.clone());
let signed2 = state.sign_token(token);
// Same input, same key, same output
assert_eq!(signed1.signature, signed2.signature);
}
#[test]
fn test_sequence_incrementing() {
let state = PermitState::new();
let seq1 = state.next_sequence();
let seq2 = state.next_sequence();
let seq3 = state.next_sequence();
assert_eq!(seq1, 0);
assert_eq!(seq2, 1);
assert_eq!(seq3, 2);
}
}
#[cfg(test)]
mod verification {
use super::*;
#[test]
fn test_verify_signed_token() {
let state = PermitState::new();
let verifier = state.verifier();
let token = create_test_token("test-action", 0);
let signed = state.sign_token(token);
assert!(verifier.verify(&signed).is_ok());
}
#[test]
fn test_verify_unsigned_token_fails() {
let state = PermitState::new();
let verifier = state.verifier();
let token = create_test_token("test-action", 0);
// Token is not signed (signature is zero)
// Verification of unsigned token should FAIL
let result = verifier.verify(&token);
assert!(result.is_err(), "Unsigned token should fail verification");
}
#[test]
fn test_verify_full_checks_ttl() {
let state = PermitState::new();
let verifier = state.verifier();
// Create an already-expired token
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1, // Very old
ttl_ns: 1, // Very short
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
// Full verification should fail due to expiry
let result = verifier.verify_full(&signed);
assert!(matches!(result, Err(VerifyError::Expired)));
}
}
#[cfg(test)]
mod signable_content {
use super::*;
#[test]
fn test_signable_content_deterministic() {
let token = create_test_token("test", 42);
let content1 = token.signable_content();
let content2 = token.signable_content();
assert_eq!(content1, content2);
}
#[test]
fn test_signable_content_changes_with_fields() {
let token1 = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let mut token2 = token1.clone();
token2.sequence = 1;
assert_ne!(token1.signable_content(), token2.signable_content());
}
#[test]
fn test_signable_content_excludes_mac() {
let mut token1 = create_test_token("test", 0);
let mut token2 = token1.clone();
token1.signature = [1u8; 64];
token2.signature = [2u8; 64];
// Different MACs but same signable content
assert_eq!(token1.signable_content(), token2.signable_content());
}
#[test]
fn test_signable_content_includes_decision() {
let token_permit = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let token_deny = PermitToken {
decision: GateDecision::Deny,
..token_permit.clone()
};
assert_ne!(
token_permit.signable_content(),
token_deny.signable_content()
);
}
}
#[cfg(test)]
mod base64_encoding {
use super::*;
#[test]
fn test_encode_decode_roundtrip() {
let token = create_test_token("test-action", 42);
let encoded = token.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
assert_eq!(token.action_id, decoded.action_id);
assert_eq!(token.sequence, decoded.sequence);
assert_eq!(token.decision, decoded.decision);
}
#[test]
fn test_decode_invalid_base64() {
let result = PermitToken::decode_base64("not valid base64!!!");
assert!(matches!(result, Err(TokenDecodeError::InvalidBase64)));
}
#[test]
fn test_decode_invalid_json() {
// Valid base64 but not JSON
let encoded =
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, b"not json");
let result = PermitToken::decode_base64(&encoded);
assert!(matches!(result, Err(TokenDecodeError::InvalidJson)));
}
#[test]
fn test_signed_token_encode_decode() {
let state = PermitState::new();
let token = create_test_token("test", 0);
let signed = state.sign_token(token);
let encoded = signed.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
// MAC should be preserved
assert_eq!(signed.signature, decoded.signature);
}
}
#[cfg(test)]
mod security_tests {
use super::*;
/// Test that different keys produce different signatures
#[test]
fn test_different_keys_different_signatures() {
let state1 = PermitState::new();
let state2 = PermitState::new();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed1 = state1.sign_token(token.clone());
let signed2 = state2.sign_token(token);
assert_ne!(signed1.signature, signed2.signature);
}
/// Test cross-key verification fails
#[test]
fn test_cross_key_verification_fails() {
let state1 = PermitState::new();
let state2 = PermitState::new();
let verifier2 = state2.verifier();
let token = create_test_token("test", 0);
let signed = state1.sign_token(token);
// Verification with wrong key should FAIL
let result = verifier2.verify(&signed);
assert!(result.is_err(), "Cross-key verification should fail");
}
/// Test token tampering detection
#[test]
fn test_tamper_detection() {
let state = PermitState::new();
let verifier = state.verifier();
let token = create_test_token("test", 0);
let mut signed = state.sign_token(token);
// Verify original is valid
assert!(verifier.verify(&signed).is_ok(), "Original should verify");
// Tamper with the action_id
signed.action_id = "tampered".to_string();
// Verification should now FAIL because signature doesn't match
let result = verifier.verify(&signed);
assert!(result.is_err(), "Tampered token should fail verification");
}
/// Test replay attack scenario
#[test]
fn test_sequence_prevents_replay() {
let state = PermitState::new();
let token1 = create_test_token("test", state.next_sequence());
let token2 = create_test_token("test", state.next_sequence());
let signed1 = state.sign_token(token1);
let signed2 = state.sign_token(token2);
// Different sequences even for same action
assert_ne!(signed1.sequence, signed2.sequence);
assert_ne!(signed1.signature, signed2.signature);
}
/// Test witness hash binding
#[test]
fn test_witness_hash_binding() {
let state = PermitState::new();
let mut token1 = create_test_token("test", 0);
token1.witness_hash = [1u8; 32];
let mut token2 = create_test_token("test", 0);
token2.witness_hash = [2u8; 32];
let signed1 = state.sign_token(token1);
let signed2 = state.sign_token(token2);
// Different witness hashes produce different signatures
assert_ne!(signed1.signature, signed2.signature);
}
}
#[cfg(test)]
mod custom_key {
use super::*;
use ed25519_dalek::SigningKey;
use rand::rngs::OsRng;
#[test]
fn test_with_custom_key() {
let custom_key = SigningKey::generate(&mut OsRng);
let state = PermitState::with_key(custom_key);
let token = create_test_token("test", 0);
let signed = state.sign_token(token);
let verifier = state.verifier();
assert!(verifier.verify(&signed).is_ok());
}
#[test]
fn test_same_key_same_signatures() {
let key_bytes: [u8; 32] = [42u8; 32];
let key1 = SigningKey::from_bytes(&key_bytes);
let key2 = SigningKey::from_bytes(&key_bytes);
let state1 = PermitState::with_key(key1);
let state2 = PermitState::with_key(key2);
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed1 = state1.sign_token(token.clone());
let signed2 = state2.sign_token(token);
assert_eq!(signed1.signature, signed2.signature);
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_encode_decode_roundtrip(
action_id in "[a-z]{1,20}",
sequence in 0u64..1000,
ttl in 1u64..1000000000
) {
let token = PermitToken {
decision: GateDecision::Permit,
action_id,
timestamp: 1000000000,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
};
let encoded = token.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
assert_eq!(token.action_id, decoded.action_id);
assert_eq!(token.sequence, decoded.sequence);
}
#[test]
fn prop_ttl_validity(timestamp in 1u64..1000000000000u64, ttl in 1u64..1000000000000u64) {
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Valid at start
assert!(token.is_valid_time(timestamp));
// Valid just before expiry
if ttl > 1 {
assert!(token.is_valid_time(timestamp + ttl - 1));
}
// Invalid after expiry
assert!(!token.is_valid_time(timestamp + ttl + 1));
}
#[test]
fn prop_signing_adds_mac(action_id in "[a-z]{1,10}") {
let state = PermitState::new();
let token = PermitToken {
decision: GateDecision::Permit,
action_id,
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
assert_ne!(signed.signature, [0u8; 64]);
}
}
}

View File

@@ -0,0 +1,544 @@
//! Comprehensive tests for witness receipts and hash chain integrity
//!
//! Tests cover:
//! - Receipt creation and hashing
//! - Hash chain verification
//! - Tamper detection
//! - Security tests (chain manipulation, replay attacks)
use cognitum_gate_tilezero::permit::PermitToken;
use cognitum_gate_tilezero::receipt::{
EvidentialWitness, PredictiveWitness, ReceiptLog, StructuralWitness, TimestampProof,
WitnessReceipt, WitnessSummary,
};
use cognitum_gate_tilezero::GateDecision;
fn create_test_token(sequence: u64, action_id: &str) -> PermitToken {
PermitToken {
decision: GateDecision::Permit,
action_id: action_id.to_string(),
timestamp: 1000000000 + sequence * 1000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
}
}
fn create_test_summary() -> WitnessSummary {
WitnessSummary {
structural: StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 5,
boundary: vec!["edge1".to_string(), "edge2".to_string()],
},
predictive: PredictiveWitness {
set_size: 8,
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 150.0,
verdict: "accept".to_string(),
},
}
}
fn create_test_receipt(sequence: u64, previous_hash: [u8; 32]) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: create_test_token(sequence, &format!("action-{}", sequence)),
previous_hash,
witness_summary: create_test_summary(),
timestamp_proof: TimestampProof {
timestamp: 1000000000 + sequence * 1000,
previous_receipt_hash: previous_hash,
merkle_root: [0u8; 32],
},
}
}
#[cfg(test)]
mod witness_summary {
use super::*;
#[test]
fn test_empty_summary() {
let summary = WitnessSummary::empty();
assert_eq!(summary.structural.cut_value, 0.0);
assert_eq!(summary.predictive.set_size, 0);
assert_eq!(summary.evidential.e_value, 1.0);
}
#[test]
fn test_summary_hash_deterministic() {
let summary = create_test_summary();
let hash1 = summary.hash();
let hash2 = summary.hash();
assert_eq!(hash1, hash2);
}
#[test]
fn test_summary_hash_unique() {
let summary1 = create_test_summary();
let mut summary2 = create_test_summary();
summary2.structural.cut_value = 20.0;
assert_ne!(summary1.hash(), summary2.hash());
}
#[test]
fn test_summary_to_json() {
let summary = create_test_summary();
let json = summary.to_json();
assert!(json.is_object());
assert!(json["structural"]["cut_value"].is_number());
assert!(json["predictive"]["set_size"].is_number());
assert!(json["evidential"]["e_value"].is_number());
}
}
#[cfg(test)]
mod receipt_hashing {
use super::*;
#[test]
fn test_receipt_hash_nonzero() {
let receipt = create_test_receipt(0, [0u8; 32]);
let hash = receipt.hash();
assert_ne!(hash, [0u8; 32]);
}
#[test]
fn test_receipt_hash_deterministic() {
let receipt = create_test_receipt(0, [0u8; 32]);
let hash1 = receipt.hash();
let hash2 = receipt.hash();
assert_eq!(hash1, hash2);
}
#[test]
fn test_receipt_hash_changes_with_sequence() {
let receipt1 = create_test_receipt(0, [0u8; 32]);
let receipt2 = create_test_receipt(1, [0u8; 32]);
assert_ne!(receipt1.hash(), receipt2.hash());
}
#[test]
fn test_receipt_hash_changes_with_previous() {
let receipt1 = create_test_receipt(0, [0u8; 32]);
let receipt2 = create_test_receipt(0, [1u8; 32]);
assert_ne!(receipt1.hash(), receipt2.hash());
}
#[test]
fn test_receipt_hash_includes_witness() {
let mut receipt1 = create_test_receipt(0, [0u8; 32]);
let mut receipt2 = create_test_receipt(0, [0u8; 32]);
receipt2.witness_summary.structural.cut_value = 99.0;
assert_ne!(receipt1.hash(), receipt2.hash());
}
}
#[cfg(test)]
mod receipt_log {
use super::*;
#[test]
fn test_new_log_empty() {
let log = ReceiptLog::new();
assert!(log.is_empty());
assert_eq!(log.len(), 0);
assert_eq!(log.latest_sequence(), None);
}
#[test]
fn test_genesis_hash() {
let log = ReceiptLog::new();
assert_eq!(log.last_hash(), [0u8; 32]);
}
#[test]
fn test_append_single() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
assert_eq!(log.len(), 1);
assert_eq!(log.latest_sequence(), Some(0));
assert_ne!(log.last_hash(), [0u8; 32]);
}
#[test]
fn test_append_multiple() {
let mut log = ReceiptLog::new();
for i in 0..5 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
assert_eq!(log.len(), 5);
assert_eq!(log.latest_sequence(), Some(4));
}
#[test]
fn test_get_receipt() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
let retrieved = log.get(0);
assert!(retrieved.is_some());
assert_eq!(retrieved.unwrap().sequence, 0);
}
#[test]
fn test_get_nonexistent() {
let log = ReceiptLog::new();
assert!(log.get(0).is_none());
assert!(log.get(999).is_none());
}
}
#[cfg(test)]
mod hash_chain_verification {
use super::*;
#[test]
fn test_verify_empty_chain() {
let log = ReceiptLog::new();
// Verifying empty chain up to 0 should fail (no receipt at 0)
assert!(log.verify_chain_to(0).is_err());
}
#[test]
fn test_verify_single_receipt() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
assert!(log.verify_chain_to(0).is_ok());
}
#[test]
fn test_verify_chain_multiple() {
let mut log = ReceiptLog::new();
for i in 0..10 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// Verify full chain
assert!(log.verify_chain_to(9).is_ok());
// Verify partial chains
assert!(log.verify_chain_to(0).is_ok());
assert!(log.verify_chain_to(5).is_ok());
}
#[test]
fn test_verify_beyond_latest() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
// Trying to verify beyond what exists should fail
assert!(log.verify_chain_to(1).is_err());
}
}
#[cfg(test)]
mod tamper_detection {
use super::*;
#[test]
fn test_detect_modified_hash() {
let mut log = ReceiptLog::new();
// Build a valid chain
for i in 0..5 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// The chain should be valid
assert!(log.verify_chain_to(4).is_ok());
}
#[test]
fn test_chain_with_gap() {
let mut log = ReceiptLog::new();
// Add receipt at 0
let receipt0 = create_test_receipt(0, log.last_hash());
log.append(receipt0);
// Skip 1, add at 2 (breaking chain)
let receipt2 = create_test_receipt(2, log.last_hash());
log.append(receipt2);
// Verify should fail at sequence 1 (missing)
assert!(log.verify_chain_to(2).is_err());
}
}
#[cfg(test)]
mod timestamp_proof {
use super::*;
#[test]
fn test_timestamp_proof_structure() {
let proof = TimestampProof {
timestamp: 1000000000,
previous_receipt_hash: [1u8; 32],
merkle_root: [2u8; 32],
};
assert_eq!(proof.timestamp, 1000000000);
assert_eq!(proof.previous_receipt_hash, [1u8; 32]);
assert_eq!(proof.merkle_root, [2u8; 32]);
}
#[test]
fn test_receipt_contains_timestamp_proof() {
let receipt = create_test_receipt(5, [3u8; 32]);
assert_eq!(receipt.timestamp_proof.previous_receipt_hash, [3u8; 32]);
assert!(receipt.timestamp_proof.timestamp > 0);
}
#[test]
fn test_timestamp_ordering() {
let mut log = ReceiptLog::new();
for i in 0..5 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// Each receipt should have increasing timestamp
let mut prev_ts = 0;
for i in 0..5 {
let receipt = log.get(i).unwrap();
assert!(receipt.timestamp_proof.timestamp > prev_ts);
prev_ts = receipt.timestamp_proof.timestamp;
}
}
}
#[cfg(test)]
mod structural_witness {
use super::*;
#[test]
fn test_structural_witness_fields() {
let witness = StructuralWitness {
cut_value: 15.0,
partition: "fragile".to_string(),
critical_edges: 3,
boundary: vec!["e1".to_string(), "e2".to_string(), "e3".to_string()],
};
assert_eq!(witness.cut_value, 15.0);
assert_eq!(witness.partition, "fragile");
assert_eq!(witness.critical_edges, 3);
assert_eq!(witness.boundary.len(), 3);
}
#[test]
fn test_structural_witness_serialization() {
let witness = StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 2,
boundary: vec![],
};
let json = serde_json::to_string(&witness).unwrap();
let restored: StructuralWitness = serde_json::from_str(&json).unwrap();
assert_eq!(witness.cut_value, restored.cut_value);
assert_eq!(witness.partition, restored.partition);
}
}
#[cfg(test)]
mod predictive_witness {
use super::*;
#[test]
fn test_predictive_witness_fields() {
let witness = PredictiveWitness {
set_size: 12,
coverage: 0.95,
};
assert_eq!(witness.set_size, 12);
assert_eq!(witness.coverage, 0.95);
}
#[test]
fn test_predictive_witness_serialization() {
let witness = PredictiveWitness {
set_size: 5,
coverage: 0.9,
};
let json = serde_json::to_string(&witness).unwrap();
let restored: PredictiveWitness = serde_json::from_str(&json).unwrap();
assert_eq!(witness.set_size, restored.set_size);
assert!((witness.coverage - restored.coverage).abs() < 0.001);
}
}
#[cfg(test)]
mod evidential_witness {
use super::*;
#[test]
fn test_evidential_witness_fields() {
let witness = EvidentialWitness {
e_value: 250.0,
verdict: "accept".to_string(),
};
assert_eq!(witness.e_value, 250.0);
assert_eq!(witness.verdict, "accept");
}
#[test]
fn test_evidential_witness_verdicts() {
let accept = EvidentialWitness {
e_value: 200.0,
verdict: "accept".to_string(),
};
let cont = EvidentialWitness {
e_value: 50.0,
verdict: "continue".to_string(),
};
let reject = EvidentialWitness {
e_value: 0.005,
verdict: "reject".to_string(),
};
assert_eq!(accept.verdict, "accept");
assert_eq!(cont.verdict, "continue");
assert_eq!(reject.verdict, "reject");
}
}
#[cfg(test)]
mod security_tests {
use super::*;
/// Test that forged receipts are detected
#[test]
fn test_forged_receipt_detection() {
let mut log = ReceiptLog::new();
// Build legitimate chain
for i in 0..3 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// A forged receipt with wrong previous hash would break verification
// (simulated by the verify_chain_to test with gaps)
}
/// Test that hash provides uniqueness
#[test]
fn test_hash_collision_resistance() {
let mut hashes = std::collections::HashSet::new();
// Generate many receipts and check for collisions
for i in 0..100 {
let receipt = create_test_receipt(i, [i as u8; 32]);
let hash = receipt.hash();
assert!(hashes.insert(hash), "Hash collision at sequence {}", i);
}
}
/// Test that modifying any field changes the hash
#[test]
fn test_all_fields_affect_hash() {
let base = create_test_receipt(0, [0u8; 32]);
let base_hash = base.hash();
// Modify sequence
let mut modified = create_test_receipt(0, [0u8; 32]);
modified.sequence = 1;
assert_ne!(base_hash, modified.hash());
// Modify previous_hash
let modified2 = create_test_receipt(0, [1u8; 32]);
assert_ne!(base_hash, modified2.hash());
// Modify witness
let mut modified3 = create_test_receipt(0, [0u8; 32]);
modified3.witness_summary.evidential.e_value = 0.0;
assert_ne!(base_hash, modified3.hash());
}
/// Test sequence monotonicity
#[test]
fn test_sequence_monotonicity() {
let mut log = ReceiptLog::new();
let mut prev_seq = None;
for i in 0..10 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
if let Some(prev) = prev_seq {
assert!(log.get(i).unwrap().sequence > prev);
}
prev_seq = Some(i);
}
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_hash_deterministic(seq in 0u64..1000, prev in proptest::array::uniform32(0u8..255)) {
let receipt = create_test_receipt(seq, prev);
assert_eq!(receipt.hash(), receipt.hash());
}
#[test]
fn prop_different_sequences_different_hashes(seq1 in 0u64..1000, seq2 in 0u64..1000) {
prop_assume!(seq1 != seq2);
let r1 = create_test_receipt(seq1, [0u8; 32]);
let r2 = create_test_receipt(seq2, [0u8; 32]);
assert_ne!(r1.hash(), r2.hash());
}
#[test]
fn prop_chain_grows_correctly(n in 1usize..20) {
let mut log = ReceiptLog::new();
for i in 0..n {
let receipt = create_test_receipt(i as u64, log.last_hash());
log.append(receipt);
}
assert_eq!(log.len(), n);
assert!(log.verify_chain_to((n - 1) as u64).is_ok());
}
}
}

View File

@@ -0,0 +1,665 @@
//! Comprehensive tests for deterministic replay
//!
//! Tests cover:
//! - Replay engine creation and configuration
//! - Checkpoint management
//! - Decision replay and verification
//! - Security tests (ensuring determinism)
use cognitum_gate_tilezero::replay::{
ReplayDifference, ReplayEngine, ReplayError, ReplayResult, SequenceVerification,
StateSnapshot, TileSnapshot,
};
use cognitum_gate_tilezero::receipt::{
EvidentialWitness, PredictiveWitness, StructuralWitness, TimestampProof, WitnessReceipt,
WitnessSummary,
};
use cognitum_gate_tilezero::permit::PermitToken;
use cognitum_gate_tilezero::GateDecision;
use std::collections::HashMap;
fn create_test_receipt(
sequence: u64,
decision: GateDecision,
witness: WitnessSummary,
) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: PermitToken {
decision,
action_id: format!("action-{}", sequence),
timestamp: 1000000000 + sequence * 1000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
},
previous_hash: [0u8; 32],
witness_summary: witness,
timestamp_proof: TimestampProof {
timestamp: 1000000000 + sequence * 1000,
previous_receipt_hash: [0u8; 32],
merkle_root: [0u8; 32],
},
}
}
fn create_permit_witness() -> WitnessSummary {
WitnessSummary {
structural: StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 2,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 5,
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 150.0,
verdict: "accept".to_string(),
},
}
}
fn create_defer_witness() -> WitnessSummary {
WitnessSummary {
structural: StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 5,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 25, // Large set size -> defer
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 50.0,
verdict: "continue".to_string(),
},
}
}
fn create_deny_witness() -> WitnessSummary {
WitnessSummary {
structural: StructuralWitness {
cut_value: 2.0,
partition: "fragile".to_string(), // Fragile -> deny
critical_edges: 10,
boundary: vec![],
},
predictive: PredictiveWitness {
set_size: 5,
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 0.001,
verdict: "reject".to_string(),
},
}
}
#[cfg(test)]
mod engine_creation {
use super::*;
#[test]
fn test_default_engine() {
let engine = ReplayEngine::default();
assert_eq!(engine.checkpoint_count(), 0);
}
#[test]
fn test_engine_with_interval() {
let engine = ReplayEngine::new(50);
assert_eq!(engine.checkpoint_count(), 0);
}
}
#[cfg(test)]
mod checkpoint_management {
use super::*;
#[test]
fn test_save_checkpoint() {
let mut engine = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(0, snapshot);
assert_eq!(engine.checkpoint_count(), 1);
}
#[test]
fn test_checkpoint_at_interval() {
let mut engine = ReplayEngine::new(10);
// Checkpoint at 0, 10, 20 should be saved
for seq in [0, 5, 10, 15, 20] {
let snapshot = StateSnapshot {
sequence: seq,
timestamp: 1000 + seq,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(seq, snapshot);
}
// Only 0, 10, 20 should be saved (multiples of 10)
assert_eq!(engine.checkpoint_count(), 3);
}
#[test]
fn test_find_nearest_checkpoint() {
let mut engine = ReplayEngine::new(10);
for seq in [0, 10, 20] {
let snapshot = StateSnapshot {
sequence: seq,
timestamp: 1000 + seq,
global_min_cut: seq as f64,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(seq, snapshot);
}
// Find nearest for 15 -> should be 10
let (found_seq, snapshot) = engine.find_nearest_checkpoint(15).unwrap();
assert_eq!(found_seq, 10);
assert_eq!(snapshot.global_min_cut, 10.0);
// Find nearest for 25 -> should be 20
let (found_seq, _) = engine.find_nearest_checkpoint(25).unwrap();
assert_eq!(found_seq, 20);
// Find nearest for 5 -> should be 0
let (found_seq, _) = engine.find_nearest_checkpoint(5).unwrap();
assert_eq!(found_seq, 0);
}
#[test]
fn test_no_checkpoint_found() {
let engine = ReplayEngine::new(10);
assert!(engine.find_nearest_checkpoint(5).is_none());
}
#[test]
fn test_prune_checkpoints() {
let mut engine = ReplayEngine::new(10);
for seq in [0, 10, 20, 30, 40, 50] {
let snapshot = StateSnapshot {
sequence: seq,
timestamp: 1000 + seq,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(seq, snapshot);
}
assert_eq!(engine.checkpoint_count(), 6);
engine.prune_before(30);
assert_eq!(engine.checkpoint_count(), 3); // 30, 40, 50 remain
assert!(engine.find_nearest_checkpoint(20).is_none());
assert!(engine.find_nearest_checkpoint(30).is_some());
}
}
#[cfg(test)]
mod decision_replay {
use super::*;
#[test]
fn test_replay_permit() {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(0, GateDecision::Permit, create_permit_witness());
let result = engine.replay(&receipt);
assert!(result.matched);
assert_eq!(result.decision, GateDecision::Permit);
assert_eq!(result.original_decision, GateDecision::Permit);
assert!(result.differences.is_empty());
}
#[test]
fn test_replay_defer() {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(0, GateDecision::Defer, create_defer_witness());
let result = engine.replay(&receipt);
assert!(result.matched);
assert_eq!(result.decision, GateDecision::Defer);
}
#[test]
fn test_replay_deny() {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(0, GateDecision::Deny, create_deny_witness());
let result = engine.replay(&receipt);
assert!(result.matched);
assert_eq!(result.decision, GateDecision::Deny);
}
#[test]
fn test_replay_mismatch() {
let engine = ReplayEngine::new(100);
// Create a receipt where the decision doesn't match the witness
// Witness indicates DENY (fragile partition), but token says PERMIT
let receipt = create_test_receipt(0, GateDecision::Permit, create_deny_witness());
let result = engine.replay(&receipt);
assert!(!result.matched);
assert_eq!(result.decision, GateDecision::Deny); // Reconstructed from witness
assert_eq!(result.original_decision, GateDecision::Permit); // From token
assert!(!result.differences.is_empty());
}
#[test]
fn test_replay_preserves_snapshot() {
let engine = ReplayEngine::new(100);
let witness = create_permit_witness();
let receipt = create_test_receipt(0, GateDecision::Permit, witness.clone());
let result = engine.replay(&receipt);
assert_eq!(result.state_snapshot.structural.cut_value, witness.structural.cut_value);
assert_eq!(result.state_snapshot.evidential.e_value, witness.evidential.e_value);
}
}
#[cfg(test)]
mod sequence_verification {
use super::*;
#[test]
fn test_verify_empty_sequence() {
let engine = ReplayEngine::new(100);
let verification = engine.verify_sequence(&[]);
assert_eq!(verification.total_receipts, 0);
assert!(verification.all_matched);
assert_eq!(verification.mismatch_count(), 0);
}
#[test]
fn test_verify_single_receipt() {
let engine = ReplayEngine::new(100);
let receipts = vec![create_test_receipt(0, GateDecision::Permit, create_permit_witness())];
let verification = engine.verify_sequence(&receipts);
assert_eq!(verification.total_receipts, 1);
assert!(verification.all_matched);
}
#[test]
fn test_verify_multiple_receipts() {
let engine = ReplayEngine::new(100);
let receipts = vec![
create_test_receipt(0, GateDecision::Permit, create_permit_witness()),
create_test_receipt(1, GateDecision::Defer, create_defer_witness()),
create_test_receipt(2, GateDecision::Deny, create_deny_witness()),
];
let verification = engine.verify_sequence(&receipts);
assert_eq!(verification.total_receipts, 3);
assert!(verification.all_matched);
assert_eq!(verification.mismatch_count(), 0);
}
#[test]
fn test_verify_with_mismatches() {
let engine = ReplayEngine::new(100);
let receipts = vec![
create_test_receipt(0, GateDecision::Permit, create_permit_witness()),
create_test_receipt(1, GateDecision::Permit, create_deny_witness()), // Mismatch!
create_test_receipt(2, GateDecision::Deny, create_deny_witness()),
];
let verification = engine.verify_sequence(&receipts);
assert_eq!(verification.total_receipts, 3);
assert!(!verification.all_matched);
assert_eq!(verification.mismatch_count(), 1);
let mismatches: Vec<_> = verification.mismatches().collect();
assert_eq!(mismatches.len(), 1);
assert_eq!(mismatches[0].0, 1); // Sequence 1 mismatched
}
#[test]
fn test_mismatches_iterator() {
let engine = ReplayEngine::new(100);
let receipts = vec![
create_test_receipt(0, GateDecision::Permit, create_deny_witness()), // Mismatch
create_test_receipt(1, GateDecision::Permit, create_permit_witness()),
create_test_receipt(2, GateDecision::Defer, create_deny_witness()), // Mismatch
];
let verification = engine.verify_sequence(&receipts);
let mismatches: Vec<_> = verification.mismatches().collect();
assert_eq!(mismatches.len(), 2);
}
}
#[cfg(test)]
mod checkpoint_export_import {
use super::*;
#[test]
fn test_export_checkpoint() {
let mut engine = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 15.0,
aggregate_e_value: 200.0,
min_coherence: 512,
tile_states: HashMap::new(),
};
engine.save_checkpoint(0, snapshot);
let exported = engine.export_checkpoint(0);
assert!(exported.is_some());
let data = exported.unwrap();
assert!(!data.is_empty());
}
#[test]
fn test_export_nonexistent() {
let engine = ReplayEngine::new(10);
assert!(engine.export_checkpoint(0).is_none());
}
#[test]
fn test_import_checkpoint() {
let mut engine1 = ReplayEngine::new(10);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 25.0,
aggregate_e_value: 300.0,
min_coherence: 768,
tile_states: HashMap::new(),
};
engine1.save_checkpoint(0, snapshot);
let exported = engine1.export_checkpoint(0).unwrap();
let mut engine2 = ReplayEngine::new(10);
assert!(engine2.import_checkpoint(0, &exported).is_ok());
assert_eq!(engine2.checkpoint_count(), 1);
let (_, imported) = engine2.find_nearest_checkpoint(0).unwrap();
assert_eq!(imported.global_min_cut, 25.0);
}
#[test]
fn test_import_invalid_data() {
let mut engine = ReplayEngine::new(10);
let result = engine.import_checkpoint(0, b"invalid json");
assert!(matches!(result, Err(ReplayError::InvalidCheckpoint)));
}
}
#[cfg(test)]
mod tile_snapshot {
use super::*;
#[test]
fn test_tile_snapshot_in_state() {
let mut tile_states = HashMap::new();
tile_states.insert(
1,
TileSnapshot {
tile_id: 1,
coherence: 256,
e_value: 10.0,
boundary_edges: 5,
},
);
tile_states.insert(
2,
TileSnapshot {
tile_id: 2,
coherence: 512,
e_value: 20.0,
boundary_edges: 3,
},
);
let snapshot = StateSnapshot {
sequence: 0,
timestamp: 1000,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states,
};
assert_eq!(snapshot.tile_states.len(), 2);
assert_eq!(snapshot.tile_states.get(&1).unwrap().coherence, 256);
assert_eq!(snapshot.tile_states.get(&2).unwrap().e_value, 20.0);
}
}
#[cfg(test)]
mod replay_difference {
use super::*;
#[test]
fn test_difference_structure() {
let diff = ReplayDifference {
field: "decision".to_string(),
original: "permit".to_string(),
replayed: "deny".to_string(),
};
assert_eq!(diff.field, "decision");
assert_eq!(diff.original, "permit");
assert_eq!(diff.replayed, "deny");
}
}
#[cfg(test)]
mod determinism {
use super::*;
/// Test that replaying the same receipt always produces the same result
#[test]
fn test_replay_deterministic() {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(0, GateDecision::Permit, create_permit_witness());
let result1 = engine.replay(&receipt);
let result2 = engine.replay(&receipt);
assert_eq!(result1.decision, result2.decision);
assert_eq!(result1.matched, result2.matched);
assert_eq!(result1.differences.len(), result2.differences.len());
}
/// Test that different engines produce same results
#[test]
fn test_cross_engine_determinism() {
let engine1 = ReplayEngine::new(100);
let engine2 = ReplayEngine::new(50); // Different checkpoint interval
let receipt = create_test_receipt(0, GateDecision::Defer, create_defer_witness());
let result1 = engine1.replay(&receipt);
let result2 = engine2.replay(&receipt);
assert_eq!(result1.decision, result2.decision);
assert_eq!(result1.matched, result2.matched);
}
/// Test sequence verification is deterministic
#[test]
fn test_sequence_verification_deterministic() {
let engine = ReplayEngine::new(100);
let receipts = vec![
create_test_receipt(0, GateDecision::Permit, create_permit_witness()),
create_test_receipt(1, GateDecision::Deny, create_deny_witness()),
];
let v1 = engine.verify_sequence(&receipts);
let v2 = engine.verify_sequence(&receipts);
assert_eq!(v1.total_receipts, v2.total_receipts);
assert_eq!(v1.all_matched, v2.all_matched);
assert_eq!(v1.mismatch_count(), v2.mismatch_count());
}
}
#[cfg(test)]
mod security_tests {
use super::*;
/// Test that modified witness produces different replay result
#[test]
fn test_witness_tampering_detected() {
let engine = ReplayEngine::new(100);
let original = create_test_receipt(0, GateDecision::Permit, create_permit_witness());
let original_result = engine.replay(&original);
// Create tampered receipt with modified witness
let mut tampered_witness = create_permit_witness();
tampered_witness.structural.partition = "fragile".to_string();
let tampered = create_test_receipt(0, GateDecision::Permit, tampered_witness);
let tampered_result = engine.replay(&tampered);
// Tampered one should fail replay
assert!(original_result.matched);
assert!(!tampered_result.matched);
}
/// Test audit trail completeness
#[test]
fn test_audit_trail() {
let engine = ReplayEngine::new(100);
let mut receipts = Vec::new();
// Build a sequence of decisions
for i in 0..10 {
let witness = if i % 3 == 0 {
create_permit_witness()
} else if i % 3 == 1 {
create_defer_witness()
} else {
create_deny_witness()
};
let decision = if i % 3 == 0 {
GateDecision::Permit
} else if i % 3 == 1 {
GateDecision::Defer
} else {
GateDecision::Deny
};
receipts.push(create_test_receipt(i, decision, witness));
}
let verification = engine.verify_sequence(&receipts);
// All should match since we built them consistently
assert!(verification.all_matched);
assert_eq!(verification.total_receipts, 10);
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_replay_always_produces_result(sequence in 0u64..1000) {
let engine = ReplayEngine::new(100);
let receipt = create_test_receipt(
sequence,
GateDecision::Permit,
create_permit_witness()
);
let result = engine.replay(&receipt);
// Should always produce a valid result
assert!(result.decision == GateDecision::Permit ||
result.decision == GateDecision::Defer ||
result.decision == GateDecision::Deny);
}
#[test]
fn prop_checkpoint_interval_works(interval in 1u64..100) {
let mut engine = ReplayEngine::new(interval);
for seq in 0..interval * 3 {
let snapshot = StateSnapshot {
sequence: seq,
timestamp: 1000 + seq,
global_min_cut: 10.0,
aggregate_e_value: 100.0,
min_coherence: 256,
tile_states: HashMap::new(),
};
engine.save_checkpoint(seq, snapshot);
}
// Should have saved at least 3 checkpoints
assert!(engine.checkpoint_count() >= 3);
}
#[test]
fn prop_matching_decisions_have_empty_differences(seq in 0u64..100) {
let engine = ReplayEngine::new(100);
// Create receipts where decision matches witness
let receipts = vec![
(GateDecision::Permit, create_permit_witness()),
(GateDecision::Defer, create_defer_witness()),
(GateDecision::Deny, create_deny_witness()),
];
for (decision, witness) in receipts {
let receipt = create_test_receipt(seq, decision, witness);
let result = engine.replay(&receipt);
if result.matched {
assert!(result.differences.is_empty());
}
}
}
}
}