Squashed 'vendor/ruvector/' content from commit b64c2172

git-subtree-dir: vendor/ruvector
git-subtree-split: b64c21726f2bb37286d9ee36a7869fef60cc6900
This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
commit d803bfe2b1
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,506 @@
//! Comprehensive tests for PERMIT/DEFER/DENY decision logic
//!
//! Tests cover:
//! - Three-filter decision pipeline
//! - Threshold configurations
//! - Edge cases and boundary conditions
//! - Security scenarios (policy violations, replay detection)
use cognitum_gate_tilezero::decision::{EvidenceDecision, GateDecision, GateThresholds};
#[cfg(test)]
mod gate_decision {
use super::*;
#[test]
fn test_decision_display() {
assert_eq!(GateDecision::Permit.to_string(), "permit");
assert_eq!(GateDecision::Defer.to_string(), "defer");
assert_eq!(GateDecision::Deny.to_string(), "deny");
}
#[test]
fn test_decision_equality() {
assert_eq!(GateDecision::Permit, GateDecision::Permit);
assert_eq!(GateDecision::Defer, GateDecision::Defer);
assert_eq!(GateDecision::Deny, GateDecision::Deny);
assert_ne!(GateDecision::Permit, GateDecision::Defer);
assert_ne!(GateDecision::Permit, GateDecision::Deny);
assert_ne!(GateDecision::Defer, GateDecision::Deny);
}
}
#[cfg(test)]
mod evidence_decision {
use super::*;
#[test]
fn test_evidence_values() {
let accept = EvidenceDecision::Accept;
let cont = EvidenceDecision::Continue;
let reject = EvidenceDecision::Reject;
assert_eq!(accept, EvidenceDecision::Accept);
assert_eq!(cont, EvidenceDecision::Continue);
assert_eq!(reject, EvidenceDecision::Reject);
}
}
#[cfg(test)]
mod threshold_configuration {
use super::*;
#[test]
fn test_default_thresholds() {
let thresholds = GateThresholds::default();
assert_eq!(thresholds.tau_deny, 0.01);
assert_eq!(thresholds.tau_permit, 100.0);
assert_eq!(thresholds.min_cut, 5.0);
assert_eq!(thresholds.max_shift, 0.5);
assert_eq!(thresholds.permit_ttl_ns, 60_000_000_000);
}
#[test]
fn test_custom_thresholds() {
let thresholds = GateThresholds {
tau_deny: 0.05,
tau_permit: 50.0,
min_cut: 10.0,
max_shift: 0.3,
permit_ttl_ns: 30_000_000_000,
theta_uncertainty: 15.0,
theta_confidence: 3.0,
};
assert_eq!(thresholds.tau_deny, 0.05);
assert_eq!(thresholds.tau_permit, 50.0);
assert_eq!(thresholds.min_cut, 10.0);
}
#[test]
fn test_threshold_ordering() {
let thresholds = GateThresholds::default();
// tau_deny < 1 < tau_permit (typical e-process thresholds)
assert!(thresholds.tau_deny < 1.0);
assert!(thresholds.tau_permit > 1.0);
assert!(thresholds.tau_deny < thresholds.tau_permit);
}
#[test]
fn test_conformal_thresholds() {
let thresholds = GateThresholds::default();
// theta_confidence < theta_uncertainty (smaller set = more confident)
assert!(thresholds.theta_confidence < thresholds.theta_uncertainty);
}
}
#[cfg(test)]
mod three_filter_logic {
use super::*;
/// Test the structural filter (min-cut check)
#[test]
fn test_structural_filter_deny() {
// If min-cut is below threshold, should DENY
let thresholds = GateThresholds::default();
// Low min-cut (below threshold of 5.0)
let min_cut = 3.0;
let shift_pressure = 0.1; // OK
let e_aggregate = 150.0; // OK
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
/// Test the shift filter (coherence check)
#[test]
fn test_shift_filter_defer() {
let thresholds = GateThresholds::default();
// OK min-cut, high shift pressure
let min_cut = 10.0; // OK
let shift_pressure = 0.8; // Above threshold of 0.5
let e_aggregate = 150.0; // OK
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Defer);
}
/// Test the evidence filter (e-value check)
#[test]
fn test_evidence_filter_deny() {
let thresholds = GateThresholds::default();
// OK min-cut, OK shift, low e-value (evidence against coherence)
let min_cut = 10.0;
let shift_pressure = 0.1;
let e_aggregate = 0.005; // Below tau_deny of 0.01
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
#[test]
fn test_evidence_filter_defer() {
let thresholds = GateThresholds::default();
// OK min-cut, OK shift, moderate e-value (insufficient evidence)
let min_cut = 10.0;
let shift_pressure = 0.1;
let e_aggregate = 50.0; // Between tau_deny (0.01) and tau_permit (100)
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Defer);
}
#[test]
fn test_all_filters_pass_permit() {
let thresholds = GateThresholds::default();
// Everything OK
let min_cut = 10.0;
let shift_pressure = 0.1;
let e_aggregate = 150.0; // Above tau_permit of 100
let decision = apply_three_filters(min_cut, shift_pressure, e_aggregate, &thresholds);
assert_eq!(decision, GateDecision::Permit);
}
// Helper function to simulate the three-filter logic
fn apply_three_filters(
min_cut: f64,
shift_pressure: f64,
e_aggregate: f64,
thresholds: &GateThresholds,
) -> GateDecision {
// 1. Structural filter
if min_cut < thresholds.min_cut {
return GateDecision::Deny;
}
// 2. Shift filter
if shift_pressure >= thresholds.max_shift {
return GateDecision::Defer;
}
// 3. Evidence filter
if e_aggregate < thresholds.tau_deny {
return GateDecision::Deny;
}
if e_aggregate < thresholds.tau_permit {
return GateDecision::Defer;
}
GateDecision::Permit
}
}
#[cfg(test)]
mod boundary_conditions {
use super::*;
#[test]
fn test_min_cut_at_threshold() {
let thresholds = GateThresholds::default();
// Exactly at threshold
let decision = decide_structural(5.0, &thresholds);
assert_eq!(decision, GateDecision::Permit); // >= threshold is OK
}
#[test]
fn test_min_cut_just_below() {
let thresholds = GateThresholds::default();
let decision = decide_structural(4.999, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
#[test]
fn test_e_value_at_deny_threshold() {
let thresholds = GateThresholds::default();
let decision = decide_evidence(0.01, &thresholds);
assert_eq!(decision, EvidenceDecision::Continue); // Exactly at threshold continues
}
#[test]
fn test_e_value_at_permit_threshold() {
let thresholds = GateThresholds::default();
let decision = decide_evidence(100.0, &thresholds);
assert_eq!(decision, EvidenceDecision::Accept);
}
#[test]
fn test_zero_values() {
let thresholds = GateThresholds::default();
assert_eq!(decide_structural(0.0, &thresholds), GateDecision::Deny);
assert_eq!(decide_evidence(0.0, &thresholds), EvidenceDecision::Reject);
}
// Helper functions
fn decide_structural(min_cut: f64, thresholds: &GateThresholds) -> GateDecision {
if min_cut >= thresholds.min_cut {
GateDecision::Permit
} else {
GateDecision::Deny
}
}
fn decide_evidence(e_aggregate: f64, thresholds: &GateThresholds) -> EvidenceDecision {
if e_aggregate < thresholds.tau_deny {
EvidenceDecision::Reject
} else if e_aggregate >= thresholds.tau_permit {
EvidenceDecision::Accept
} else {
EvidenceDecision::Continue
}
}
}
#[cfg(test)]
mod filter_priority {
use super::*;
/// Structural filter has highest priority (checked first)
#[test]
fn test_structural_overrides_evidence() {
let thresholds = GateThresholds::default();
// Low min-cut but high e-value
let min_cut = 1.0; // Fail structural
let e_aggregate = 1000.0; // Would pass evidence
// Structural failure should result in DENY
let decision = if min_cut < thresholds.min_cut {
GateDecision::Deny
} else if e_aggregate >= thresholds.tau_permit {
GateDecision::Permit
} else {
GateDecision::Defer
};
assert_eq!(decision, GateDecision::Deny);
}
/// Shift filter checked after structural
#[test]
fn test_shift_overrides_evidence() {
let thresholds = GateThresholds::default();
// Good min-cut, high shift, high e-value
let min_cut = 10.0; // Pass structural
let shift_pressure = 0.9; // Fail shift
let e_aggregate = 1000.0; // Would pass evidence
let decision = if min_cut < thresholds.min_cut {
GateDecision::Deny
} else if shift_pressure >= thresholds.max_shift {
GateDecision::Defer
} else if e_aggregate >= thresholds.tau_permit {
GateDecision::Permit
} else {
GateDecision::Defer
};
assert_eq!(decision, GateDecision::Defer);
}
}
#[cfg(test)]
mod ttl_scenarios {
use super::*;
#[test]
fn test_permit_ttl() {
let thresholds = GateThresholds::default();
assert_eq!(thresholds.permit_ttl_ns, 60_000_000_000); // 60 seconds
}
#[test]
fn test_custom_short_ttl() {
let thresholds = GateThresholds {
permit_ttl_ns: 1_000_000_000, // 1 second
..Default::default()
};
assert_eq!(thresholds.permit_ttl_ns, 1_000_000_000);
}
#[test]
fn test_custom_long_ttl() {
let thresholds = GateThresholds {
permit_ttl_ns: 3600_000_000_000, // 1 hour
..Default::default()
};
assert_eq!(thresholds.permit_ttl_ns, 3600_000_000_000);
}
}
#[cfg(test)]
mod extreme_values {
use super::*;
#[test]
fn test_very_high_e_value() {
let thresholds = GateThresholds::default();
let decision = decide_evidence_full(1e10, &thresholds);
assert_eq!(decision, EvidenceDecision::Accept);
}
#[test]
fn test_very_low_e_value() {
let thresholds = GateThresholds::default();
let decision = decide_evidence_full(1e-10, &thresholds);
assert_eq!(decision, EvidenceDecision::Reject);
}
#[test]
fn test_very_high_min_cut() {
let thresholds = GateThresholds::default();
let decision = decide_structural_full(1000.0, &thresholds);
assert_eq!(decision, GateDecision::Permit);
}
// Helper
fn decide_evidence_full(e_aggregate: f64, thresholds: &GateThresholds) -> EvidenceDecision {
if e_aggregate < thresholds.tau_deny {
EvidenceDecision::Reject
} else if e_aggregate >= thresholds.tau_permit {
EvidenceDecision::Accept
} else {
EvidenceDecision::Continue
}
}
fn decide_structural_full(min_cut: f64, thresholds: &GateThresholds) -> GateDecision {
if min_cut >= thresholds.min_cut {
GateDecision::Permit
} else {
GateDecision::Deny
}
}
}
#[cfg(test)]
mod serialization {
use super::*;
#[test]
fn test_decision_serialization() {
let decisions = [
GateDecision::Permit,
GateDecision::Defer,
GateDecision::Deny,
];
for decision in &decisions {
let json = serde_json::to_string(decision).unwrap();
let restored: GateDecision = serde_json::from_str(&json).unwrap();
assert_eq!(*decision, restored);
}
}
#[test]
fn test_decision_json_values() {
assert_eq!(
serde_json::to_string(&GateDecision::Permit).unwrap(),
"\"permit\""
);
assert_eq!(
serde_json::to_string(&GateDecision::Defer).unwrap(),
"\"defer\""
);
assert_eq!(
serde_json::to_string(&GateDecision::Deny).unwrap(),
"\"deny\""
);
}
#[test]
fn test_thresholds_serialization() {
let thresholds = GateThresholds::default();
let json = serde_json::to_string(&thresholds).unwrap();
let restored: GateThresholds = serde_json::from_str(&json).unwrap();
assert_eq!(thresholds.tau_deny, restored.tau_deny);
assert_eq!(thresholds.tau_permit, restored.tau_permit);
assert_eq!(thresholds.min_cut, restored.min_cut);
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_permit_requires_all_pass(
min_cut in 0.0f64..100.0,
shift in 0.0f64..1.0,
e_val in 0.001f64..1000.0
) {
let thresholds = GateThresholds::default();
let structural_ok = min_cut >= thresholds.min_cut;
let shift_ok = shift < thresholds.max_shift;
let evidence_ok = e_val >= thresholds.tau_permit;
let decision = apply_filters(min_cut, shift, e_val, &thresholds);
if decision == GateDecision::Permit {
assert!(structural_ok && shift_ok && evidence_ok);
}
}
#[test]
fn prop_structural_fail_is_deny(min_cut in 0.0f64..4.9) {
let thresholds = GateThresholds::default();
// Any structural failure (min_cut < 5.0) should result in Deny
let decision = apply_filters(min_cut, 0.0, 1000.0, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
#[test]
fn prop_evidence_deny_threshold(e_val in 0.0f64..0.009) {
let thresholds = GateThresholds::default();
// E-value below tau_deny should result in Deny (if structural passes)
let decision = apply_filters(100.0, 0.0, e_val, &thresholds);
assert_eq!(decision, GateDecision::Deny);
}
}
fn apply_filters(
min_cut: f64,
shift_pressure: f64,
e_aggregate: f64,
thresholds: &GateThresholds,
) -> GateDecision {
if min_cut < thresholds.min_cut {
return GateDecision::Deny;
}
if shift_pressure >= thresholds.max_shift {
return GateDecision::Defer;
}
if e_aggregate < thresholds.tau_deny {
return GateDecision::Deny;
}
if e_aggregate < thresholds.tau_permit {
return GateDecision::Defer;
}
GateDecision::Permit
}
}

View File

@@ -0,0 +1,578 @@
//! Comprehensive tests for report merging from multiple tiles
//!
//! Tests cover:
//! - Merging strategies (SimpleAverage, WeightedAverage, Median, Maximum, BFT)
//! - Edge cases (empty reports, conflicting epochs)
//! - Node and edge aggregation
//! - Property-based tests for merge invariants
use cognitum_gate_tilezero::merge::{
EdgeSummary, MergeError, MergeStrategy, MergedReport, NodeSummary, ReportMerger, WorkerReport,
};
fn create_test_report(tile_id: u8, epoch: u64) -> WorkerReport {
let mut report = WorkerReport::new(tile_id, epoch);
report.confidence = 0.9;
report.local_mincut = 1.0;
report
}
fn add_test_node(report: &mut WorkerReport, id: &str, weight: f64, coherence: f64) {
report.add_node(NodeSummary {
id: id.to_string(),
weight,
edge_count: 5,
coherence,
});
}
fn add_test_boundary_edge(report: &mut WorkerReport, source: &str, target: &str, capacity: f64) {
report.add_boundary_edge(EdgeSummary {
source: source.to_string(),
target: target.to_string(),
capacity,
is_boundary: true,
});
}
#[cfg(test)]
mod basic_merging {
use super::*;
#[test]
fn test_merge_single_report() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut report = create_test_report(1, 0);
add_test_node(&mut report, "node1", 1.0, 0.9);
let merged = merger.merge(&[report]).unwrap();
assert_eq!(merged.worker_count, 1);
assert_eq!(merged.epoch, 0);
assert!(merged.super_nodes.contains_key("node1"));
}
#[test]
fn test_merge_multiple_reports() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = (1..=3)
.map(|i| {
let mut report = create_test_report(i, 0);
add_test_node(&mut report, "node1", i as f64 * 0.1, 0.9);
report
})
.collect();
let merged = merger.merge(&reports).unwrap();
assert_eq!(merged.worker_count, 3);
let node = merged.super_nodes.get("node1").unwrap();
// Average of 0.1, 0.2, 0.3 = 0.2
assert!((node.weight - 0.2).abs() < 0.001);
}
#[test]
fn test_merge_empty_reports() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let result = merger.merge(&[]);
assert!(matches!(result, Err(MergeError::EmptyReports)));
}
#[test]
fn test_merge_conflicting_epochs() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports = vec![create_test_report(1, 0), create_test_report(2, 1)];
let result = merger.merge(&reports);
assert!(matches!(result, Err(MergeError::ConflictingEpochs)));
}
}
#[cfg(test)]
mod merge_strategies {
use super::*;
#[test]
fn test_simple_average() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = [1.0, 2.0, 3.0]
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 2.0).abs() < 0.001);
}
#[test]
fn test_weighted_average() {
let merger = ReportMerger::new(MergeStrategy::WeightedAverage);
let mut reports = Vec::new();
// High coherence node has weight 1.0, low coherence has weight 3.0
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node", 1.0, 0.9);
reports.push(r1);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "node", 3.0, 0.3);
reports.push(r2);
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
// Weight should be biased toward the high-coherence value
// weighted = (1.0 * 0.9 + 3.0 * 0.3) / (0.9 + 0.3) = 1.8 / 1.2 = 1.5
assert!((node.weight - 1.5).abs() < 0.001);
}
#[test]
fn test_median() {
let merger = ReportMerger::new(MergeStrategy::Median);
let weights = [1.0, 5.0, 2.0, 8.0, 3.0]; // Median = 3.0
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 3.0).abs() < 0.001);
}
#[test]
fn test_median_even_count() {
let merger = ReportMerger::new(MergeStrategy::Median);
let weights = [1.0, 2.0, 3.0, 4.0]; // Median = (2.0 + 3.0) / 2 = 2.5
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 2.5).abs() < 0.001);
}
#[test]
fn test_maximum() {
let merger = ReportMerger::new(MergeStrategy::Maximum);
let weights = [1.0, 5.0, 2.0, 8.0, 3.0];
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.weight - 8.0).abs() < 0.001);
}
#[test]
fn test_byzantine_fault_tolerant() {
let merger = ReportMerger::new(MergeStrategy::ByzantineFaultTolerant);
// 6 reports: 4 honest (weight ~2.0), 2 Byzantine (weight 100.0)
let mut reports = Vec::new();
for i in 0..4 {
let mut r = create_test_report(i, 0);
add_test_node(&mut r, "node", 2.0, 0.9);
reports.push(r);
}
for i in 4..6 {
let mut r = create_test_report(i, 0);
add_test_node(&mut r, "node", 100.0, 0.9);
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
// BFT should exclude Byzantine values (top 2/3 of sorted = 4 lowest)
// Average of 4 lowest: 2.0
assert!(node.weight < 50.0); // Should not be influenced by 100.0
}
}
#[cfg(test)]
mod edge_merging {
use super::*;
#[test]
fn test_merge_boundary_edges() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_boundary_edge(&mut r1, "A", "B", 1.0);
add_test_boundary_edge(&mut r1, "B", "C", 2.0);
let mut r2 = create_test_report(2, 0);
add_test_boundary_edge(&mut r2, "A", "B", 3.0); // Same edge, different capacity
add_test_boundary_edge(&mut r2, "C", "D", 4.0);
let merged = merger.merge(&[r1, r2]).unwrap();
// Should have 3 unique edges
assert_eq!(merged.boundary_edges.len(), 3);
// Find the A-B edge
let ab_edge = merged
.boundary_edges
.iter()
.find(|e| (e.source == "A" && e.target == "B") || (e.source == "B" && e.target == "A"))
.unwrap();
// Average of 1.0 and 3.0 = 2.0
assert!((ab_edge.capacity - 2.0).abs() < 0.001);
assert_eq!(ab_edge.report_count, 2);
}
#[test]
fn test_edge_normalization() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_boundary_edge(&mut r1, "A", "B", 1.0);
let mut r2 = create_test_report(2, 0);
add_test_boundary_edge(&mut r2, "B", "A", 1.0); // Reverse order
let merged = merger.merge(&[r1, r2]).unwrap();
// Should be recognized as the same edge
assert_eq!(merged.boundary_edges.len(), 1);
assert_eq!(merged.boundary_edges[0].report_count, 2);
}
}
#[cfg(test)]
mod node_aggregation {
use super::*;
#[test]
fn test_contributors_tracked() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node", 1.0, 0.9);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "node", 2.0, 0.9);
let merged = merger.merge(&[r1, r2]).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!(node.contributors.contains(&1));
assert!(node.contributors.contains(&2));
}
#[test]
fn test_edge_count_summed() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
r1.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 10,
coherence: 0.9,
});
let mut r2 = create_test_report(2, 0);
r2.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 20,
coherence: 0.9,
});
let merged = merger.merge(&[r1, r2]).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert_eq!(node.total_edge_count, 30);
}
#[test]
fn test_coherence_averaged() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
r1.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 5,
coherence: 0.8,
});
let mut r2 = create_test_report(2, 0);
r2.add_node(NodeSummary {
id: "node".to_string(),
weight: 1.0,
edge_count: 5,
coherence: 0.6,
});
let merged = merger.merge(&[r1, r2]).unwrap();
let node = merged.super_nodes.get("node").unwrap();
assert!((node.avg_coherence - 0.7).abs() < 0.001);
}
}
#[cfg(test)]
mod global_mincut_estimate {
use super::*;
#[test]
fn test_mincut_from_local_values() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut reports = Vec::new();
for i in 0..3 {
let mut r = create_test_report(i, 0);
r.local_mincut = 1.0 + i as f64;
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
// Should have some estimate based on local values
assert!(merged.global_mincut_estimate > 0.0);
}
#[test]
fn test_mincut_with_boundaries() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
r1.local_mincut = 5.0;
add_test_boundary_edge(&mut r1, "A", "B", 1.0);
let merged = merger.merge(&[r1]).unwrap();
// Boundary edges should affect the estimate
assert!(merged.global_mincut_estimate > 0.0);
}
}
#[cfg(test)]
mod confidence_aggregation {
use super::*;
#[test]
fn test_geometric_mean_confidence() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut reports = Vec::new();
for i in 0..3 {
let mut r = create_test_report(i, 0);
r.confidence = 0.8;
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
// Geometric mean of [0.8, 0.8, 0.8] = 0.8
assert!((merged.confidence - 0.8).abs() < 0.001);
}
#[test]
fn test_bft_confidence() {
let merger = ReportMerger::new(MergeStrategy::ByzantineFaultTolerant);
let mut reports = Vec::new();
let confidences = [0.9, 0.85, 0.88, 0.2, 0.1]; // Two low-confidence outliers
for (i, &c) in confidences.iter().enumerate() {
let mut r = create_test_report(i as u8, 0);
r.confidence = c;
reports.push(r);
}
let merged = merger.merge(&reports).unwrap();
// BFT should use conservative estimate (minimum of top 2/3)
assert!(merged.confidence > 0.5); // Should not be dragged down by 0.1, 0.2
}
}
#[cfg(test)]
mod state_hash {
use super::*;
#[test]
fn test_state_hash_computed() {
let mut report = create_test_report(1, 0);
add_test_node(&mut report, "node1", 1.0, 0.9);
report.compute_state_hash();
assert_ne!(report.state_hash, [0u8; 32]);
}
#[test]
fn test_state_hash_deterministic() {
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node1", 1.0, 0.9);
r1.compute_state_hash();
let mut r2 = create_test_report(1, 0);
add_test_node(&mut r2, "node1", 1.0, 0.9);
r2.compute_state_hash();
assert_eq!(r1.state_hash, r2.state_hash);
}
#[test]
fn test_state_hash_changes_with_data() {
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node1", 1.0, 0.9);
r1.compute_state_hash();
let mut r2 = create_test_report(1, 0);
add_test_node(&mut r2, "node1", 2.0, 0.9); // Different weight
r2.compute_state_hash();
assert_ne!(r1.state_hash, r2.state_hash);
}
}
#[cfg(test)]
mod multiple_nodes {
use super::*;
#[test]
fn test_merge_disjoint_nodes() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "node_a", 1.0, 0.9);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "node_b", 2.0, 0.9);
let merged = merger.merge(&[r1, r2]).unwrap();
assert!(merged.super_nodes.contains_key("node_a"));
assert!(merged.super_nodes.contains_key("node_b"));
assert_eq!(merged.super_nodes.len(), 2);
}
#[test]
fn test_merge_overlapping_nodes() {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let mut r1 = create_test_report(1, 0);
add_test_node(&mut r1, "shared", 1.0, 0.9);
add_test_node(&mut r1, "only_r1", 2.0, 0.9);
let mut r2 = create_test_report(2, 0);
add_test_node(&mut r2, "shared", 3.0, 0.9);
add_test_node(&mut r2, "only_r2", 4.0, 0.9);
let merged = merger.merge(&[r1, r2]).unwrap();
assert_eq!(merged.super_nodes.len(), 3);
let shared = merged.super_nodes.get("shared").unwrap();
assert!((shared.weight - 2.0).abs() < 0.001); // Average of 1.0 and 3.0
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_merge_preserves_epoch(epoch in 0u64..1000) {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let r1 = create_test_report(1, epoch);
let r2 = create_test_report(2, epoch);
let merged = merger.merge(&[r1, r2]).unwrap();
assert_eq!(merged.epoch, epoch);
}
#[test]
fn prop_merge_counts_workers(n in 1usize..10) {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = (0..n)
.map(|i| create_test_report(i as u8, 0))
.collect();
let merged = merger.merge(&reports).unwrap();
assert_eq!(merged.worker_count, n);
}
#[test]
fn prop_average_in_range(weights in proptest::collection::vec(0.1f64..100.0, 2..10)) {
let merger = ReportMerger::new(MergeStrategy::SimpleAverage);
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
let min = weights.iter().cloned().fold(f64::INFINITY, f64::min);
let max = weights.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
assert!(node.weight >= min);
assert!(node.weight <= max);
}
#[test]
fn prop_maximum_is_largest(weights in proptest::collection::vec(0.1f64..100.0, 2..10)) {
let merger = ReportMerger::new(MergeStrategy::Maximum);
let reports: Vec<_> = weights
.iter()
.enumerate()
.map(|(i, &w)| {
let mut r = create_test_report(i as u8, 0);
add_test_node(&mut r, "node", w, 0.9);
r
})
.collect();
let merged = merger.merge(&reports).unwrap();
let node = merged.super_nodes.get("node").unwrap();
let max = weights.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
assert!((node.weight - max).abs() < 0.001);
}
}
}

View File

@@ -0,0 +1,608 @@
//! Comprehensive tests for permit token signing and verification
//!
//! Tests cover:
//! - Token creation and signing
//! - Signature verification
//! - TTL validation
//! - Security tests (invalid signatures, replay attacks, tamper detection)
use cognitum_gate_tilezero::permit::{
PermitState, PermitToken, TokenDecodeError, Verifier, VerifyError,
};
use cognitum_gate_tilezero::GateDecision;
fn create_test_token(action_id: &str, sequence: u64) -> PermitToken {
PermitToken {
decision: GateDecision::Permit,
action_id: action_id.to_string(),
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64,
ttl_ns: 60_000_000_000, // 60 seconds
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
}
}
#[cfg(test)]
mod token_creation {
use super::*;
#[test]
fn test_token_fields() {
let token = create_test_token("test-action", 42);
assert_eq!(token.action_id, "test-action");
assert_eq!(token.sequence, 42);
assert_eq!(token.decision, GateDecision::Permit);
assert!(token.timestamp > 0);
assert_eq!(token.ttl_ns, 60_000_000_000);
}
#[test]
fn test_token_with_different_decisions() {
let permit_token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let defer_token = PermitToken {
decision: GateDecision::Defer,
..permit_token.clone()
};
let deny_token = PermitToken {
decision: GateDecision::Deny,
..permit_token.clone()
};
assert_eq!(permit_token.decision, GateDecision::Permit);
assert_eq!(defer_token.decision, GateDecision::Defer);
assert_eq!(deny_token.decision, GateDecision::Deny);
}
}
#[cfg(test)]
mod ttl_validation {
use super::*;
#[test]
fn test_token_valid_within_ttl() {
let now_ns = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos() as u64;
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: now_ns,
ttl_ns: 60_000_000_000, // 60 seconds
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Check immediately - should be valid
assert!(token.is_valid_time(now_ns));
// Check 30 seconds later - still valid
assert!(token.is_valid_time(now_ns + 30_000_000_000));
}
#[test]
fn test_token_invalid_after_ttl() {
let timestamp = 1000000000u64;
let ttl = 60_000_000_000u64; // 60 seconds
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// After TTL expires
let after_expiry = timestamp + ttl + 1;
assert!(!token.is_valid_time(after_expiry));
}
#[test]
fn test_token_valid_at_exactly_expiry() {
let timestamp = 1000000000u64;
let ttl = 60_000_000_000u64;
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Exactly at expiry boundary
let at_expiry = timestamp + ttl;
assert!(token.is_valid_time(at_expiry));
}
#[test]
fn test_zero_ttl() {
let timestamp = 1000000000u64;
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: 0, // Immediate expiry
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Valid at exact timestamp
assert!(token.is_valid_time(timestamp));
// Invalid one nanosecond later
assert!(!token.is_valid_time(timestamp + 1));
}
}
#[cfg(test)]
mod signing {
use super::*;
#[test]
fn test_permit_state_creation() {
let state = PermitState::new();
// Should be able to get a verifier
let _verifier = state.verifier();
}
#[test]
fn test_sign_token() {
let state = PermitState::new();
let token = create_test_token("test-action", 0);
let signed = state.sign_token(token);
// MAC should be set (non-zero)
assert_ne!(signed.signature, [0u8; 64]);
}
#[test]
fn test_sign_different_tokens_different_macs() {
let state = PermitState::new();
let token1 = create_test_token("action-1", 0);
let token2 = create_test_token("action-2", 1);
let signed1 = state.sign_token(token1);
let signed2 = state.sign_token(token2);
assert_ne!(signed1.signature, signed2.signature);
}
#[test]
fn test_sign_deterministic() {
let state = PermitState::new();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed1 = state.sign_token(token.clone());
let signed2 = state.sign_token(token);
// Same input, same key, same output
assert_eq!(signed1.signature, signed2.signature);
}
#[test]
fn test_sequence_incrementing() {
let state = PermitState::new();
let seq1 = state.next_sequence();
let seq2 = state.next_sequence();
let seq3 = state.next_sequence();
assert_eq!(seq1, 0);
assert_eq!(seq2, 1);
assert_eq!(seq3, 2);
}
}
#[cfg(test)]
mod verification {
use super::*;
#[test]
fn test_verify_signed_token() {
let state = PermitState::new();
let verifier = state.verifier();
let token = create_test_token("test-action", 0);
let signed = state.sign_token(token);
assert!(verifier.verify(&signed).is_ok());
}
#[test]
fn test_verify_unsigned_token_fails() {
let state = PermitState::new();
let verifier = state.verifier();
let token = create_test_token("test-action", 0);
// Token is not signed (signature is zero)
// Verification of unsigned token should FAIL
let result = verifier.verify(&token);
assert!(result.is_err(), "Unsigned token should fail verification");
}
#[test]
fn test_verify_full_checks_ttl() {
let state = PermitState::new();
let verifier = state.verifier();
// Create an already-expired token
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1, // Very old
ttl_ns: 1, // Very short
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
// Full verification should fail due to expiry
let result = verifier.verify_full(&signed);
assert!(matches!(result, Err(VerifyError::Expired)));
}
}
#[cfg(test)]
mod signable_content {
use super::*;
#[test]
fn test_signable_content_deterministic() {
let token = create_test_token("test", 42);
let content1 = token.signable_content();
let content2 = token.signable_content();
assert_eq!(content1, content2);
}
#[test]
fn test_signable_content_changes_with_fields() {
let token1 = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let mut token2 = token1.clone();
token2.sequence = 1;
assert_ne!(token1.signable_content(), token2.signable_content());
}
#[test]
fn test_signable_content_excludes_mac() {
let mut token1 = create_test_token("test", 0);
let mut token2 = token1.clone();
token1.signature = [1u8; 64];
token2.signature = [2u8; 64];
// Different MACs but same signable content
assert_eq!(token1.signable_content(), token2.signable_content());
}
#[test]
fn test_signable_content_includes_decision() {
let token_permit = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let token_deny = PermitToken {
decision: GateDecision::Deny,
..token_permit.clone()
};
assert_ne!(
token_permit.signable_content(),
token_deny.signable_content()
);
}
}
#[cfg(test)]
mod base64_encoding {
use super::*;
#[test]
fn test_encode_decode_roundtrip() {
let token = create_test_token("test-action", 42);
let encoded = token.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
assert_eq!(token.action_id, decoded.action_id);
assert_eq!(token.sequence, decoded.sequence);
assert_eq!(token.decision, decoded.decision);
}
#[test]
fn test_decode_invalid_base64() {
let result = PermitToken::decode_base64("not valid base64!!!");
assert!(matches!(result, Err(TokenDecodeError::InvalidBase64)));
}
#[test]
fn test_decode_invalid_json() {
// Valid base64 but not JSON
let encoded =
base64::Engine::encode(&base64::engine::general_purpose::STANDARD, b"not json");
let result = PermitToken::decode_base64(&encoded);
assert!(matches!(result, Err(TokenDecodeError::InvalidJson)));
}
#[test]
fn test_signed_token_encode_decode() {
let state = PermitState::new();
let token = create_test_token("test", 0);
let signed = state.sign_token(token);
let encoded = signed.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
// MAC should be preserved
assert_eq!(signed.signature, decoded.signature);
}
}
#[cfg(test)]
mod security_tests {
use super::*;
/// Test that different keys produce different signatures
#[test]
fn test_different_keys_different_signatures() {
let state1 = PermitState::new();
let state2 = PermitState::new();
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed1 = state1.sign_token(token.clone());
let signed2 = state2.sign_token(token);
assert_ne!(signed1.signature, signed2.signature);
}
/// Test cross-key verification fails
#[test]
fn test_cross_key_verification_fails() {
let state1 = PermitState::new();
let state2 = PermitState::new();
let verifier2 = state2.verifier();
let token = create_test_token("test", 0);
let signed = state1.sign_token(token);
// Verification with wrong key should FAIL
let result = verifier2.verify(&signed);
assert!(result.is_err(), "Cross-key verification should fail");
}
/// Test token tampering detection
#[test]
fn test_tamper_detection() {
let state = PermitState::new();
let verifier = state.verifier();
let token = create_test_token("test", 0);
let mut signed = state.sign_token(token);
// Verify original is valid
assert!(verifier.verify(&signed).is_ok(), "Original should verify");
// Tamper with the action_id
signed.action_id = "tampered".to_string();
// Verification should now FAIL because signature doesn't match
let result = verifier.verify(&signed);
assert!(result.is_err(), "Tampered token should fail verification");
}
/// Test replay attack scenario
#[test]
fn test_sequence_prevents_replay() {
let state = PermitState::new();
let token1 = create_test_token("test", state.next_sequence());
let token2 = create_test_token("test", state.next_sequence());
let signed1 = state.sign_token(token1);
let signed2 = state.sign_token(token2);
// Different sequences even for same action
assert_ne!(signed1.sequence, signed2.sequence);
assert_ne!(signed1.signature, signed2.signature);
}
/// Test witness hash binding
#[test]
fn test_witness_hash_binding() {
let state = PermitState::new();
let mut token1 = create_test_token("test", 0);
token1.witness_hash = [1u8; 32];
let mut token2 = create_test_token("test", 0);
token2.witness_hash = [2u8; 32];
let signed1 = state.sign_token(token1);
let signed2 = state.sign_token(token2);
// Different witness hashes produce different signatures
assert_ne!(signed1.signature, signed2.signature);
}
}
#[cfg(test)]
mod custom_key {
use super::*;
use ed25519_dalek::SigningKey;
use rand::rngs::OsRng;
#[test]
fn test_with_custom_key() {
let custom_key = SigningKey::generate(&mut OsRng);
let state = PermitState::with_key(custom_key);
let token = create_test_token("test", 0);
let signed = state.sign_token(token);
let verifier = state.verifier();
assert!(verifier.verify(&signed).is_ok());
}
#[test]
fn test_same_key_same_signatures() {
let key_bytes: [u8; 32] = [42u8; 32];
let key1 = SigningKey::from_bytes(&key_bytes);
let key2 = SigningKey::from_bytes(&key_bytes);
let state1 = PermitState::with_key(key1);
let state2 = PermitState::with_key(key2);
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed1 = state1.sign_token(token.clone());
let signed2 = state2.sign_token(token);
assert_eq!(signed1.signature, signed2.signature);
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_encode_decode_roundtrip(
action_id in "[a-z]{1,20}",
sequence in 0u64..1000,
ttl in 1u64..1000000000
) {
let token = PermitToken {
decision: GateDecision::Permit,
action_id,
timestamp: 1000000000,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
};
let encoded = token.encode_base64();
let decoded = PermitToken::decode_base64(&encoded).unwrap();
assert_eq!(token.action_id, decoded.action_id);
assert_eq!(token.sequence, decoded.sequence);
}
#[test]
fn prop_ttl_validity(timestamp in 1u64..1000000000000u64, ttl in 1u64..1000000000000u64) {
let token = PermitToken {
decision: GateDecision::Permit,
action_id: "test".to_string(),
timestamp,
ttl_ns: ttl,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
// Valid at start
assert!(token.is_valid_time(timestamp));
// Valid just before expiry
if ttl > 1 {
assert!(token.is_valid_time(timestamp + ttl - 1));
}
// Invalid after expiry
assert!(!token.is_valid_time(timestamp + ttl + 1));
}
#[test]
fn prop_signing_adds_mac(action_id in "[a-z]{1,10}") {
let state = PermitState::new();
let token = PermitToken {
decision: GateDecision::Permit,
action_id,
timestamp: 1000000000,
ttl_ns: 60000,
witness_hash: [0u8; 32],
sequence: 0,
signature: [0u8; 64],
};
let signed = state.sign_token(token);
assert_ne!(signed.signature, [0u8; 64]);
}
}
}

View File

@@ -0,0 +1,544 @@
//! Comprehensive tests for witness receipts and hash chain integrity
//!
//! Tests cover:
//! - Receipt creation and hashing
//! - Hash chain verification
//! - Tamper detection
//! - Security tests (chain manipulation, replay attacks)
use cognitum_gate_tilezero::permit::PermitToken;
use cognitum_gate_tilezero::receipt::{
EvidentialWitness, PredictiveWitness, ReceiptLog, StructuralWitness, TimestampProof,
WitnessReceipt, WitnessSummary,
};
use cognitum_gate_tilezero::GateDecision;
fn create_test_token(sequence: u64, action_id: &str) -> PermitToken {
PermitToken {
decision: GateDecision::Permit,
action_id: action_id.to_string(),
timestamp: 1000000000 + sequence * 1000,
ttl_ns: 60_000_000_000,
witness_hash: [0u8; 32],
sequence,
signature: [0u8; 64],
}
}
fn create_test_summary() -> WitnessSummary {
WitnessSummary {
structural: StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 5,
boundary: vec!["edge1".to_string(), "edge2".to_string()],
},
predictive: PredictiveWitness {
set_size: 8,
coverage: 0.9,
},
evidential: EvidentialWitness {
e_value: 150.0,
verdict: "accept".to_string(),
},
}
}
fn create_test_receipt(sequence: u64, previous_hash: [u8; 32]) -> WitnessReceipt {
WitnessReceipt {
sequence,
token: create_test_token(sequence, &format!("action-{}", sequence)),
previous_hash,
witness_summary: create_test_summary(),
timestamp_proof: TimestampProof {
timestamp: 1000000000 + sequence * 1000,
previous_receipt_hash: previous_hash,
merkle_root: [0u8; 32],
},
}
}
#[cfg(test)]
mod witness_summary {
use super::*;
#[test]
fn test_empty_summary() {
let summary = WitnessSummary::empty();
assert_eq!(summary.structural.cut_value, 0.0);
assert_eq!(summary.predictive.set_size, 0);
assert_eq!(summary.evidential.e_value, 1.0);
}
#[test]
fn test_summary_hash_deterministic() {
let summary = create_test_summary();
let hash1 = summary.hash();
let hash2 = summary.hash();
assert_eq!(hash1, hash2);
}
#[test]
fn test_summary_hash_unique() {
let summary1 = create_test_summary();
let mut summary2 = create_test_summary();
summary2.structural.cut_value = 20.0;
assert_ne!(summary1.hash(), summary2.hash());
}
#[test]
fn test_summary_to_json() {
let summary = create_test_summary();
let json = summary.to_json();
assert!(json.is_object());
assert!(json["structural"]["cut_value"].is_number());
assert!(json["predictive"]["set_size"].is_number());
assert!(json["evidential"]["e_value"].is_number());
}
}
#[cfg(test)]
mod receipt_hashing {
use super::*;
#[test]
fn test_receipt_hash_nonzero() {
let receipt = create_test_receipt(0, [0u8; 32]);
let hash = receipt.hash();
assert_ne!(hash, [0u8; 32]);
}
#[test]
fn test_receipt_hash_deterministic() {
let receipt = create_test_receipt(0, [0u8; 32]);
let hash1 = receipt.hash();
let hash2 = receipt.hash();
assert_eq!(hash1, hash2);
}
#[test]
fn test_receipt_hash_changes_with_sequence() {
let receipt1 = create_test_receipt(0, [0u8; 32]);
let receipt2 = create_test_receipt(1, [0u8; 32]);
assert_ne!(receipt1.hash(), receipt2.hash());
}
#[test]
fn test_receipt_hash_changes_with_previous() {
let receipt1 = create_test_receipt(0, [0u8; 32]);
let receipt2 = create_test_receipt(0, [1u8; 32]);
assert_ne!(receipt1.hash(), receipt2.hash());
}
#[test]
fn test_receipt_hash_includes_witness() {
let mut receipt1 = create_test_receipt(0, [0u8; 32]);
let mut receipt2 = create_test_receipt(0, [0u8; 32]);
receipt2.witness_summary.structural.cut_value = 99.0;
assert_ne!(receipt1.hash(), receipt2.hash());
}
}
#[cfg(test)]
mod receipt_log {
use super::*;
#[test]
fn test_new_log_empty() {
let log = ReceiptLog::new();
assert!(log.is_empty());
assert_eq!(log.len(), 0);
assert_eq!(log.latest_sequence(), None);
}
#[test]
fn test_genesis_hash() {
let log = ReceiptLog::new();
assert_eq!(log.last_hash(), [0u8; 32]);
}
#[test]
fn test_append_single() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
assert_eq!(log.len(), 1);
assert_eq!(log.latest_sequence(), Some(0));
assert_ne!(log.last_hash(), [0u8; 32]);
}
#[test]
fn test_append_multiple() {
let mut log = ReceiptLog::new();
for i in 0..5 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
assert_eq!(log.len(), 5);
assert_eq!(log.latest_sequence(), Some(4));
}
#[test]
fn test_get_receipt() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
let retrieved = log.get(0);
assert!(retrieved.is_some());
assert_eq!(retrieved.unwrap().sequence, 0);
}
#[test]
fn test_get_nonexistent() {
let log = ReceiptLog::new();
assert!(log.get(0).is_none());
assert!(log.get(999).is_none());
}
}
#[cfg(test)]
mod hash_chain_verification {
use super::*;
#[test]
fn test_verify_empty_chain() {
let log = ReceiptLog::new();
// Verifying empty chain up to 0 should fail (no receipt at 0)
assert!(log.verify_chain_to(0).is_err());
}
#[test]
fn test_verify_single_receipt() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
assert!(log.verify_chain_to(0).is_ok());
}
#[test]
fn test_verify_chain_multiple() {
let mut log = ReceiptLog::new();
for i in 0..10 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// Verify full chain
assert!(log.verify_chain_to(9).is_ok());
// Verify partial chains
assert!(log.verify_chain_to(0).is_ok());
assert!(log.verify_chain_to(5).is_ok());
}
#[test]
fn test_verify_beyond_latest() {
let mut log = ReceiptLog::new();
let receipt = create_test_receipt(0, log.last_hash());
log.append(receipt);
// Trying to verify beyond what exists should fail
assert!(log.verify_chain_to(1).is_err());
}
}
#[cfg(test)]
mod tamper_detection {
use super::*;
#[test]
fn test_detect_modified_hash() {
let mut log = ReceiptLog::new();
// Build a valid chain
for i in 0..5 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// The chain should be valid
assert!(log.verify_chain_to(4).is_ok());
}
#[test]
fn test_chain_with_gap() {
let mut log = ReceiptLog::new();
// Add receipt at 0
let receipt0 = create_test_receipt(0, log.last_hash());
log.append(receipt0);
// Skip 1, add at 2 (breaking chain)
let receipt2 = create_test_receipt(2, log.last_hash());
log.append(receipt2);
// Verify should fail at sequence 1 (missing)
assert!(log.verify_chain_to(2).is_err());
}
}
#[cfg(test)]
mod timestamp_proof {
use super::*;
#[test]
fn test_timestamp_proof_structure() {
let proof = TimestampProof {
timestamp: 1000000000,
previous_receipt_hash: [1u8; 32],
merkle_root: [2u8; 32],
};
assert_eq!(proof.timestamp, 1000000000);
assert_eq!(proof.previous_receipt_hash, [1u8; 32]);
assert_eq!(proof.merkle_root, [2u8; 32]);
}
#[test]
fn test_receipt_contains_timestamp_proof() {
let receipt = create_test_receipt(5, [3u8; 32]);
assert_eq!(receipt.timestamp_proof.previous_receipt_hash, [3u8; 32]);
assert!(receipt.timestamp_proof.timestamp > 0);
}
#[test]
fn test_timestamp_ordering() {
let mut log = ReceiptLog::new();
for i in 0..5 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// Each receipt should have increasing timestamp
let mut prev_ts = 0;
for i in 0..5 {
let receipt = log.get(i).unwrap();
assert!(receipt.timestamp_proof.timestamp > prev_ts);
prev_ts = receipt.timestamp_proof.timestamp;
}
}
}
#[cfg(test)]
mod structural_witness {
use super::*;
#[test]
fn test_structural_witness_fields() {
let witness = StructuralWitness {
cut_value: 15.0,
partition: "fragile".to_string(),
critical_edges: 3,
boundary: vec!["e1".to_string(), "e2".to_string(), "e3".to_string()],
};
assert_eq!(witness.cut_value, 15.0);
assert_eq!(witness.partition, "fragile");
assert_eq!(witness.critical_edges, 3);
assert_eq!(witness.boundary.len(), 3);
}
#[test]
fn test_structural_witness_serialization() {
let witness = StructuralWitness {
cut_value: 10.0,
partition: "stable".to_string(),
critical_edges: 2,
boundary: vec![],
};
let json = serde_json::to_string(&witness).unwrap();
let restored: StructuralWitness = serde_json::from_str(&json).unwrap();
assert_eq!(witness.cut_value, restored.cut_value);
assert_eq!(witness.partition, restored.partition);
}
}
#[cfg(test)]
mod predictive_witness {
use super::*;
#[test]
fn test_predictive_witness_fields() {
let witness = PredictiveWitness {
set_size: 12,
coverage: 0.95,
};
assert_eq!(witness.set_size, 12);
assert_eq!(witness.coverage, 0.95);
}
#[test]
fn test_predictive_witness_serialization() {
let witness = PredictiveWitness {
set_size: 5,
coverage: 0.9,
};
let json = serde_json::to_string(&witness).unwrap();
let restored: PredictiveWitness = serde_json::from_str(&json).unwrap();
assert_eq!(witness.set_size, restored.set_size);
assert!((witness.coverage - restored.coverage).abs() < 0.001);
}
}
#[cfg(test)]
mod evidential_witness {
use super::*;
#[test]
fn test_evidential_witness_fields() {
let witness = EvidentialWitness {
e_value: 250.0,
verdict: "accept".to_string(),
};
assert_eq!(witness.e_value, 250.0);
assert_eq!(witness.verdict, "accept");
}
#[test]
fn test_evidential_witness_verdicts() {
let accept = EvidentialWitness {
e_value: 200.0,
verdict: "accept".to_string(),
};
let cont = EvidentialWitness {
e_value: 50.0,
verdict: "continue".to_string(),
};
let reject = EvidentialWitness {
e_value: 0.005,
verdict: "reject".to_string(),
};
assert_eq!(accept.verdict, "accept");
assert_eq!(cont.verdict, "continue");
assert_eq!(reject.verdict, "reject");
}
}
#[cfg(test)]
mod security_tests {
use super::*;
/// Test that forged receipts are detected
#[test]
fn test_forged_receipt_detection() {
let mut log = ReceiptLog::new();
// Build legitimate chain
for i in 0..3 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
}
// A forged receipt with wrong previous hash would break verification
// (simulated by the verify_chain_to test with gaps)
}
/// Test that hash provides uniqueness
#[test]
fn test_hash_collision_resistance() {
let mut hashes = std::collections::HashSet::new();
// Generate many receipts and check for collisions
for i in 0..100 {
let receipt = create_test_receipt(i, [i as u8; 32]);
let hash = receipt.hash();
assert!(hashes.insert(hash), "Hash collision at sequence {}", i);
}
}
/// Test that modifying any field changes the hash
#[test]
fn test_all_fields_affect_hash() {
let base = create_test_receipt(0, [0u8; 32]);
let base_hash = base.hash();
// Modify sequence
let mut modified = create_test_receipt(0, [0u8; 32]);
modified.sequence = 1;
assert_ne!(base_hash, modified.hash());
// Modify previous_hash
let modified2 = create_test_receipt(0, [1u8; 32]);
assert_ne!(base_hash, modified2.hash());
// Modify witness
let mut modified3 = create_test_receipt(0, [0u8; 32]);
modified3.witness_summary.evidential.e_value = 0.0;
assert_ne!(base_hash, modified3.hash());
}
/// Test sequence monotonicity
#[test]
fn test_sequence_monotonicity() {
let mut log = ReceiptLog::new();
let mut prev_seq = None;
for i in 0..10 {
let receipt = create_test_receipt(i, log.last_hash());
log.append(receipt);
if let Some(prev) = prev_seq {
assert!(log.get(i).unwrap().sequence > prev);
}
prev_seq = Some(i);
}
}
}
// Property-based tests
#[cfg(test)]
mod property_tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn prop_hash_deterministic(seq in 0u64..1000, prev in proptest::array::uniform32(0u8..255)) {
let receipt = create_test_receipt(seq, prev);
assert_eq!(receipt.hash(), receipt.hash());
}
#[test]
fn prop_different_sequences_different_hashes(seq1 in 0u64..1000, seq2 in 0u64..1000) {
prop_assume!(seq1 != seq2);
let r1 = create_test_receipt(seq1, [0u8; 32]);
let r2 = create_test_receipt(seq2, [0u8; 32]);
assert_ne!(r1.hash(), r2.hash());
}
#[test]
fn prop_chain_grows_correctly(n in 1usize..20) {
let mut log = ReceiptLog::new();
for i in 0..n {
let receipt = create_test_receipt(i as u64, log.last_hash());
log.append(receipt);
}
assert_eq!(log.len(), n);
assert!(log.verify_chain_to((n - 1) as u64).is_ok());
}
}
}