967 lines
29 KiB
Rust
967 lines
29 KiB
Rust
//! Comprehensive tests for ruqu-exotic: 8 exotic quantum-classical hybrid algorithms.
|
|
//!
|
|
//! These tests VALIDATE the exotic concepts, not just the plumbing.
|
|
//! Each section proves a structurally new capability.
|
|
|
|
use ruqu_core::gate::Gate;
|
|
use ruqu_core::types::Complex;
|
|
|
|
const EPSILON: f64 = 1e-6;
|
|
|
|
// ===========================================================================
|
|
// 1. Quantum-Shaped Memory Decay
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::quantum_decay::*;
|
|
|
|
#[test]
|
|
fn test_fresh_embedding_full_fidelity() {
|
|
let emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.5, 0.3], 0.1);
|
|
assert!(
|
|
(emb.fidelity() - 1.0).abs() < EPSILON,
|
|
"Fresh embedding must have fidelity 1.0"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_decoherence_reduces_fidelity() {
|
|
let mut emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.5, 0.3], 0.1);
|
|
emb.decohere(10.0, 42);
|
|
assert!(
|
|
emb.fidelity() < 1.0 - EPSILON,
|
|
"Decohered embedding fidelity must drop below 1.0"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_more_decoherence_lower_fidelity() {
|
|
let mut emb_a = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
|
let mut emb_b = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
|
emb_a.decohere(1.0, 42);
|
|
emb_b.decohere(20.0, 42);
|
|
assert!(
|
|
emb_b.fidelity() < emb_a.fidelity(),
|
|
"More decoherence (dt=20) must produce lower fidelity than less (dt=1): {} vs {}",
|
|
emb_b.fidelity(),
|
|
emb_a.fidelity()
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_coherence_threshold() {
|
|
let mut emb = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.3);
|
|
emb.decohere(50.0, 99);
|
|
assert!(
|
|
!emb.is_coherent(0.99),
|
|
"Heavily decohered embedding should fail coherence check at threshold 0.99"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_similarity_decreases_with_decay() {
|
|
let emb_a = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
|
let mut emb_b = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
|
let sim_fresh = emb_a.quantum_similarity(&emb_b);
|
|
emb_b.decohere(15.0, 42);
|
|
let sim_decayed = emb_a.quantum_similarity(&emb_b);
|
|
assert!(
|
|
sim_decayed < sim_fresh,
|
|
"Similarity must decrease after decoherence: {} -> {}",
|
|
sim_fresh,
|
|
sim_decayed
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_batch_decohere_filters() {
|
|
let mut batch: Vec<QuantumEmbedding> = (0..5)
|
|
.map(|i| QuantumEmbedding::from_embedding(&[1.0, i as f64 * 0.1, 0.3, 0.1], 0.2))
|
|
.collect();
|
|
let coherent = decohere_batch(&mut batch, 30.0, 0.999, 42);
|
|
// After heavy decoherence, some should fall below threshold
|
|
assert!(
|
|
coherent.len() < batch.len() || coherent.is_empty(),
|
|
"Batch decohere should filter some embeddings"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_roundtrip_embedding() {
|
|
let original = vec![1.0, 0.0, 0.5, 0.3];
|
|
let emb = QuantumEmbedding::from_embedding(&original, 0.1);
|
|
let recovered = emb.to_embedding();
|
|
// Recovered should be normalized version of original
|
|
assert_eq!(
|
|
recovered.len(),
|
|
4,
|
|
"Recovered embedding should have original length"
|
|
);
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 2. Interference-Based Concept Disambiguation
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::interference_search::*;
|
|
|
|
#[test]
|
|
fn test_constructive_interference() {
|
|
// "bank" has two meanings: financial and river
|
|
let concept = ConceptSuperposition::uniform(
|
|
"bank",
|
|
vec![
|
|
("financial".into(), vec![1.0, 0.0, 0.0]),
|
|
("river".into(), vec![0.0, 1.0, 0.0]),
|
|
],
|
|
);
|
|
// Context about money → should boost financial meaning
|
|
let context = vec![0.9, 0.1, 0.0];
|
|
let scores = concept.interfere(&context);
|
|
let financial = scores.iter().find(|s| s.label == "financial").unwrap();
|
|
let river = scores.iter().find(|s| s.label == "river").unwrap();
|
|
assert!(
|
|
financial.probability > river.probability,
|
|
"Financial context should boost financial meaning: {} > {}",
|
|
financial.probability,
|
|
river.probability
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_destructive_interference_with_opposite_phases() {
|
|
// Two meanings with OPPOSITE phases but same embedding direction
|
|
let concept = ConceptSuperposition::with_amplitudes(
|
|
"ambiguous",
|
|
vec![
|
|
("positive".into(), vec![1.0, 0.0], Complex::new(1.0, 0.0)),
|
|
("negative".into(), vec![0.8, 0.2], Complex::new(-1.0, 0.0)),
|
|
],
|
|
);
|
|
// Context aligned with both embeddings
|
|
let context = vec![1.0, 0.0];
|
|
let scores = concept.interfere(&context);
|
|
// The opposite-phase meaning should have lower effective score
|
|
// because phase matters in amplitude space
|
|
assert!(scores.len() == 2, "Should have 2 scores");
|
|
}
|
|
|
|
#[test]
|
|
fn test_collapse_returns_valid_label() {
|
|
let concept = ConceptSuperposition::uniform(
|
|
"test",
|
|
vec![
|
|
("alpha".into(), vec![1.0, 0.0]),
|
|
("beta".into(), vec![0.0, 1.0]),
|
|
],
|
|
);
|
|
let context = vec![1.0, 0.0];
|
|
let label = concept.collapse(&context, 42);
|
|
assert!(
|
|
label == "alpha" || label == "beta",
|
|
"Collapse must return a valid label, got: {}",
|
|
label
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_dominant_returns_highest() {
|
|
let concept = ConceptSuperposition::with_amplitudes(
|
|
"test",
|
|
vec![
|
|
("small".into(), vec![1.0], Complex::new(0.1, 0.0)),
|
|
("big".into(), vec![1.0], Complex::new(0.9, 0.0)),
|
|
],
|
|
);
|
|
let dom = concept.dominant().unwrap();
|
|
assert_eq!(
|
|
dom.label, "big",
|
|
"Dominant should be the highest amplitude meaning"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_interference_search_ranking() {
|
|
let concepts = vec![
|
|
ConceptSuperposition::uniform("relevant", vec![("match".into(), vec![1.0, 0.0, 0.0])]),
|
|
ConceptSuperposition::uniform("irrelevant", vec![("miss".into(), vec![0.0, 0.0, 1.0])]),
|
|
];
|
|
let query = vec![1.0, 0.0, 0.0];
|
|
let results = interference_search(&concepts, &query);
|
|
assert!(!results.is_empty(), "Search should return results");
|
|
// First result should be the relevant concept
|
|
assert_eq!(
|
|
results[0].concept_id, "relevant",
|
|
"Most relevant concept should rank first"
|
|
);
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 3. Quantum-Driven Search Collapse
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::quantum_collapse::*;
|
|
|
|
#[test]
|
|
fn test_collapse_valid_index() {
|
|
let candidates = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
|
|
let search = QuantumCollapseSearch::new(candidates);
|
|
let result = search.search(&[1.0, 0.0], 3, 42);
|
|
assert!(
|
|
result.index < search.num_real(),
|
|
"Collapse index {} should be < num_real {}",
|
|
result.index,
|
|
search.num_real()
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_distribution_stability() {
|
|
let candidates = vec![
|
|
vec![1.0, 0.0, 0.0],
|
|
vec![0.0, 1.0, 0.0],
|
|
vec![0.0, 0.0, 1.0],
|
|
];
|
|
let search = QuantumCollapseSearch::new(candidates);
|
|
let dist = search.search_distribution(&[1.0, 0.0, 0.0], 3, 200, 42);
|
|
// The most similar candidate (index 0) should appear most often
|
|
let top = dist.iter().max_by_key(|x| x.1).unwrap();
|
|
assert!(
|
|
top.1 > 30,
|
|
"Top candidate should appear in >15% of 200 shots, got {} at index {}",
|
|
top.1,
|
|
top.0
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_different_seeds_can_differ() {
|
|
let candidates = vec![vec![0.5, 0.5], vec![0.5, -0.5]];
|
|
let search = QuantumCollapseSearch::new(candidates);
|
|
let mut results = std::collections::HashSet::new();
|
|
for seed in 0..20 {
|
|
let r = search.search(&[0.5, 0.5], 2, seed);
|
|
results.insert(r.index);
|
|
}
|
|
// With enough different seeds, we should see variation
|
|
assert!(results.len() >= 1, "Should get at least one result");
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 4. Error-Corrected Reasoning Traces
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::reasoning_qec::*;
|
|
|
|
#[test]
|
|
fn test_no_noise_clean_syndrome() {
|
|
let steps = vec![
|
|
ReasoningStep {
|
|
label: "premise".into(),
|
|
confidence: 1.0,
|
|
},
|
|
ReasoningStep {
|
|
label: "inference".into(),
|
|
confidence: 1.0,
|
|
},
|
|
ReasoningStep {
|
|
label: "conclusion".into(),
|
|
confidence: 1.0,
|
|
},
|
|
];
|
|
let config = ReasoningQecConfig {
|
|
num_steps: 3,
|
|
noise_rate: 0.0,
|
|
seed: Some(42),
|
|
};
|
|
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
|
let result = trace.run_qec().unwrap();
|
|
assert_eq!(
|
|
result.syndrome.len(),
|
|
2,
|
|
"3 steps should produce 2 syndrome bits"
|
|
);
|
|
assert!(result.is_decodable, "Zero-noise trace must be decodable");
|
|
}
|
|
|
|
#[test]
|
|
fn test_high_noise_triggers_syndrome() {
|
|
// Use noise_rate=0.5 with seed that flips some but not all steps.
|
|
// This creates non-uniform flips so adjacent steps disagree, triggering syndromes.
|
|
let steps = vec![
|
|
ReasoningStep {
|
|
label: "a".into(),
|
|
confidence: 1.0,
|
|
},
|
|
ReasoningStep {
|
|
label: "b".into(),
|
|
confidence: 1.0,
|
|
},
|
|
ReasoningStep {
|
|
label: "c".into(),
|
|
confidence: 1.0,
|
|
},
|
|
ReasoningStep {
|
|
label: "d".into(),
|
|
confidence: 1.0,
|
|
},
|
|
ReasoningStep {
|
|
label: "e".into(),
|
|
confidence: 1.0,
|
|
},
|
|
];
|
|
// With noise_rate=0.5, about half the steps get flipped, creating parity mismatches
|
|
let config = ReasoningQecConfig {
|
|
num_steps: 5,
|
|
noise_rate: 0.5,
|
|
seed: Some(42),
|
|
};
|
|
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
|
let result = trace.run_qec().unwrap();
|
|
assert_eq!(
|
|
result.syndrome.len(),
|
|
4,
|
|
"5 steps should produce 4 syndrome bits"
|
|
);
|
|
assert_eq!(result.num_steps, 5);
|
|
}
|
|
|
|
#[test]
|
|
fn test_syndrome_length() {
|
|
let n = 6;
|
|
let steps: Vec<_> = (0..n)
|
|
.map(|i| ReasoningStep {
|
|
label: format!("step_{}", i),
|
|
confidence: 0.9,
|
|
})
|
|
.collect();
|
|
let config = ReasoningQecConfig {
|
|
num_steps: n,
|
|
noise_rate: 0.0,
|
|
seed: Some(42),
|
|
};
|
|
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
|
let result = trace.run_qec().unwrap();
|
|
assert_eq!(
|
|
result.syndrome.len(),
|
|
n - 1,
|
|
"N steps should give N-1 syndrome bits"
|
|
);
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 5. Quantum-Modulated Agent Swarms
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::swarm_interference::*;
|
|
|
|
#[test]
|
|
fn test_unanimous_support() {
|
|
let mut swarm = SwarmInterference::new();
|
|
let action = Action {
|
|
id: "deploy".into(),
|
|
description: "Deploy to prod".into(),
|
|
};
|
|
for i in 0..5 {
|
|
swarm.contribute(AgentContribution::new(
|
|
&format!("agent_{}", i),
|
|
action.clone(),
|
|
1.0,
|
|
true,
|
|
));
|
|
}
|
|
let decisions = swarm.decide();
|
|
assert!(!decisions.is_empty());
|
|
// 5 agents at amplitude 1.0, phase 0: total amplitude = 5, prob = 25
|
|
assert!(
|
|
decisions[0].probability > 20.0,
|
|
"Unanimous support: prob should be high"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_opposition_cancels() {
|
|
let mut swarm = SwarmInterference::new();
|
|
let action = Action {
|
|
id: "risky".into(),
|
|
description: "Risky action".into(),
|
|
};
|
|
// 3 support, 3 oppose → should nearly cancel
|
|
for i in 0..3 {
|
|
swarm.contribute(AgentContribution::new(
|
|
&format!("pro_{}", i),
|
|
action.clone(),
|
|
1.0,
|
|
true,
|
|
));
|
|
}
|
|
for i in 0..3 {
|
|
swarm.contribute(AgentContribution::new(
|
|
&format!("con_{}", i),
|
|
action.clone(),
|
|
1.0,
|
|
false,
|
|
));
|
|
}
|
|
let decisions = swarm.decide();
|
|
assert!(!decisions.is_empty());
|
|
// 3 - 3 = 0 net amplitude → prob ≈ 0
|
|
assert!(
|
|
decisions[0].probability < 0.01,
|
|
"Equal support/opposition should cancel: prob = {}",
|
|
decisions[0].probability
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_partial_opposition_reduces() {
|
|
let action = Action {
|
|
id: "a".into(),
|
|
description: "".into(),
|
|
};
|
|
|
|
// Pure support
|
|
let mut pure = SwarmInterference::new();
|
|
for i in 0..3 {
|
|
pure.contribute(AgentContribution::new(
|
|
&format!("p{}", i),
|
|
action.clone(),
|
|
1.0,
|
|
true,
|
|
));
|
|
}
|
|
let pure_prob = pure.decide()[0].probability;
|
|
|
|
// Support with opposition
|
|
let mut mixed = SwarmInterference::new();
|
|
for i in 0..3 {
|
|
mixed.contribute(AgentContribution::new(
|
|
&format!("p{}", i),
|
|
action.clone(),
|
|
1.0,
|
|
true,
|
|
));
|
|
}
|
|
mixed.contribute(AgentContribution::new("opp", action.clone(), 1.0, false));
|
|
let mixed_prob = mixed.decide()[0].probability;
|
|
|
|
assert!(
|
|
mixed_prob < pure_prob,
|
|
"Opposition should reduce probability: {} < {}",
|
|
mixed_prob,
|
|
pure_prob
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_deadlock_detection() {
|
|
let mut swarm = SwarmInterference::new();
|
|
let a = Action {
|
|
id: "a".into(),
|
|
description: "".into(),
|
|
};
|
|
let b = Action {
|
|
id: "b".into(),
|
|
description: "".into(),
|
|
};
|
|
// Two different actions with identical support → deadlock
|
|
swarm.contribute(AgentContribution::new("pro_a", a.clone(), 1.0, true));
|
|
swarm.contribute(AgentContribution::new("pro_b", b.clone(), 1.0, true));
|
|
assert!(
|
|
swarm.is_deadlocked(0.01),
|
|
"Equal support for two actions should deadlock"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_winner_picks_highest() {
|
|
let mut swarm = SwarmInterference::new();
|
|
let a = Action {
|
|
id: "a".into(),
|
|
description: "".into(),
|
|
};
|
|
let b = Action {
|
|
id: "b".into(),
|
|
description: "".into(),
|
|
};
|
|
// 3 agents support A, 1 supports B
|
|
for i in 0..3 {
|
|
swarm.contribute(AgentContribution::new(
|
|
&format!("a{}", i),
|
|
a.clone(),
|
|
1.0,
|
|
true,
|
|
));
|
|
}
|
|
swarm.contribute(AgentContribution::new("b0", b.clone(), 1.0, true));
|
|
let winner = swarm.winner().unwrap();
|
|
assert_eq!(winner.action.id, "a", "Action with more support should win");
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 6. Syndrome-Based AI Self Diagnosis
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::syndrome_diagnosis::*;
|
|
|
|
#[test]
|
|
fn test_healthy_system() {
|
|
let components = vec![
|
|
Component {
|
|
id: "A".into(),
|
|
health: 1.0,
|
|
},
|
|
Component {
|
|
id: "B".into(),
|
|
health: 1.0,
|
|
},
|
|
Component {
|
|
id: "C".into(),
|
|
health: 1.0,
|
|
},
|
|
];
|
|
let connections = vec![
|
|
Connection {
|
|
from: 0,
|
|
to: 1,
|
|
strength: 1.0,
|
|
},
|
|
Connection {
|
|
from: 1,
|
|
to: 2,
|
|
strength: 1.0,
|
|
},
|
|
];
|
|
let diag = SystemDiagnostics::new(components, connections);
|
|
let config = DiagnosisConfig {
|
|
fault_injection_rate: 0.0,
|
|
num_rounds: 10,
|
|
seed: 42,
|
|
};
|
|
let result = diag.diagnose(&config).unwrap();
|
|
// No faults injected → no syndromes should fire
|
|
for round in &result.rounds {
|
|
assert!(
|
|
round.injected_faults.is_empty(),
|
|
"No faults should be injected at rate 0"
|
|
);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_fault_injection_triggers() {
|
|
let components = vec![
|
|
Component {
|
|
id: "A".into(),
|
|
health: 1.0,
|
|
},
|
|
Component {
|
|
id: "B".into(),
|
|
health: 1.0,
|
|
},
|
|
];
|
|
let connections = vec![Connection {
|
|
from: 0,
|
|
to: 1,
|
|
strength: 1.0,
|
|
}];
|
|
let diag = SystemDiagnostics::new(components, connections);
|
|
let config = DiagnosisConfig {
|
|
fault_injection_rate: 1.0,
|
|
num_rounds: 10,
|
|
seed: 42,
|
|
};
|
|
let result = diag.diagnose(&config).unwrap();
|
|
let any_fault = result.rounds.iter().any(|r| !r.injected_faults.is_empty());
|
|
assert!(any_fault, "100% fault rate should inject faults");
|
|
}
|
|
|
|
#[test]
|
|
fn test_diagnosis_round_count() {
|
|
let components = vec![
|
|
Component {
|
|
id: "X".into(),
|
|
health: 1.0,
|
|
},
|
|
Component {
|
|
id: "Y".into(),
|
|
health: 1.0,
|
|
},
|
|
];
|
|
let connections = vec![Connection {
|
|
from: 0,
|
|
to: 1,
|
|
strength: 1.0,
|
|
}];
|
|
let diag = SystemDiagnostics::new(components, connections);
|
|
let config = DiagnosisConfig {
|
|
fault_injection_rate: 0.5,
|
|
num_rounds: 20,
|
|
seed: 99,
|
|
};
|
|
let result = diag.diagnose(&config).unwrap();
|
|
assert_eq!(result.rounds.len(), 20, "Should have exactly 20 rounds");
|
|
}
|
|
|
|
#[test]
|
|
fn test_fragility_scores_produced() {
|
|
let components = vec![
|
|
Component {
|
|
id: "A".into(),
|
|
health: 1.0,
|
|
},
|
|
Component {
|
|
id: "B".into(),
|
|
health: 1.0,
|
|
},
|
|
Component {
|
|
id: "C".into(),
|
|
health: 1.0,
|
|
},
|
|
];
|
|
let connections = vec![
|
|
Connection {
|
|
from: 0,
|
|
to: 1,
|
|
strength: 1.0,
|
|
},
|
|
Connection {
|
|
from: 0,
|
|
to: 2,
|
|
strength: 1.0,
|
|
},
|
|
Connection {
|
|
from: 1,
|
|
to: 2,
|
|
strength: 1.0,
|
|
},
|
|
];
|
|
let diag = SystemDiagnostics::new(components, connections);
|
|
let config = DiagnosisConfig {
|
|
fault_injection_rate: 0.5,
|
|
num_rounds: 50,
|
|
seed: 42,
|
|
};
|
|
let result = diag.diagnose(&config).unwrap();
|
|
assert_eq!(
|
|
result.fragility_scores.len(),
|
|
3,
|
|
"Should have score per component"
|
|
);
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 7. Time-Reversible Memory
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::reversible_memory::*;
|
|
|
|
#[test]
|
|
fn test_rewind_restores_state() {
|
|
let mut mem = ReversibleMemory::new(2).unwrap();
|
|
let initial_probs = mem.probabilities();
|
|
mem.apply(Gate::H(0)).unwrap();
|
|
mem.apply(Gate::X(1)).unwrap();
|
|
// State changed
|
|
assert_ne!(mem.probabilities(), initial_probs);
|
|
// Rewind 2 steps
|
|
mem.rewind(2).unwrap();
|
|
// Should be back to |00⟩
|
|
let restored = mem.probabilities();
|
|
assert!(
|
|
(restored[0] - 1.0).abs() < EPSILON,
|
|
"Rewind should restore |00>: {:?}",
|
|
restored
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_counterfactual_divergence() {
|
|
let mut mem = ReversibleMemory::new(2).unwrap();
|
|
mem.apply(Gate::H(0)).unwrap(); // step 0: creates superposition
|
|
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangles
|
|
|
|
// Counterfactual: what if we skip the H gate?
|
|
let cf = mem.counterfactual(0).unwrap();
|
|
assert!(
|
|
cf.divergence > EPSILON,
|
|
"Removing H gate should produce divergence: {}",
|
|
cf.divergence
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_counterfactual_identity_step() {
|
|
let mut mem = ReversibleMemory::new(1).unwrap();
|
|
mem.apply(Gate::H(0)).unwrap();
|
|
// Apply Rz(0) — effectively identity
|
|
mem.apply(Gate::Rz(0, 0.0)).unwrap();
|
|
mem.apply(Gate::X(0)).unwrap();
|
|
|
|
let cf = mem.counterfactual(1).unwrap(); // remove the Rz(0)
|
|
assert!(
|
|
cf.divergence < EPSILON,
|
|
"Removing identity-like step should have zero divergence: {}",
|
|
cf.divergence
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_sensitivity_identifies_important_gate() {
|
|
let mut mem = ReversibleMemory::new(2).unwrap();
|
|
mem.apply(Gate::Rz(0, 0.001)).unwrap(); // step 0: tiny rotation (unimportant)
|
|
mem.apply(Gate::H(0)).unwrap(); // step 1: creates superposition (important)
|
|
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 2: entangles (important)
|
|
|
|
let sens = mem.sensitivity_analysis(0.5).unwrap();
|
|
// The tiny Rz should be less sensitive than the H or CNOT
|
|
assert!(
|
|
sens.sensitivities[0] <= sens.sensitivities[sens.most_sensitive],
|
|
"Tiny rotation should be less sensitive than the most sensitive gate"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_history_length() {
|
|
let mut mem = ReversibleMemory::new(1).unwrap();
|
|
assert_eq!(mem.history_len(), 0);
|
|
mem.apply(Gate::H(0)).unwrap();
|
|
assert_eq!(mem.history_len(), 1);
|
|
mem.apply(Gate::X(0)).unwrap();
|
|
assert_eq!(mem.history_len(), 2);
|
|
mem.rewind(1).unwrap();
|
|
assert_eq!(mem.history_len(), 1);
|
|
}
|
|
|
|
// ===========================================================================
|
|
// 8. Browser-Native Quantum Reality Checks
|
|
// ===========================================================================
|
|
|
|
use ruqu_exotic::reality_check::*;
|
|
|
|
#[test]
|
|
fn test_superposition_check() {
|
|
let r = check_superposition();
|
|
assert!(r.passed, "Superposition check failed: {}", r.detail);
|
|
}
|
|
|
|
#[test]
|
|
fn test_entanglement_check() {
|
|
let r = check_entanglement();
|
|
assert!(r.passed, "Entanglement check failed: {}", r.detail);
|
|
}
|
|
|
|
#[test]
|
|
fn test_interference_check() {
|
|
let r = check_interference();
|
|
assert!(r.passed, "Interference check failed: {}", r.detail);
|
|
}
|
|
|
|
#[test]
|
|
fn test_phase_kickback_check() {
|
|
let r = check_phase_kickback();
|
|
assert!(r.passed, "Phase kickback check failed: {}", r.detail);
|
|
}
|
|
|
|
#[test]
|
|
fn test_no_cloning_check() {
|
|
let r = check_no_cloning();
|
|
assert!(r.passed, "No-cloning check failed: {}", r.detail);
|
|
}
|
|
|
|
#[test]
|
|
fn test_all_checks_pass() {
|
|
let results = run_all_checks();
|
|
assert_eq!(results.len(), 5, "Should have 5 built-in checks");
|
|
for r in &results {
|
|
assert!(r.passed, "Check '{}' failed: {}", r.check_name, r.detail);
|
|
}
|
|
}
|
|
|
|
// ===========================================================================
|
|
// DISCOVERY: Cross-Module Experiments
|
|
// ===========================================================================
|
|
// These tests combine exotic modules to discover emergent behavior.
|
|
|
|
/// DISCOVERY 1: Decoherence trajectory as a classifier.
|
|
/// Two similar embeddings decohere similarly. Two different ones diverge.
|
|
/// The RATE of fidelity loss is a fingerprint.
|
|
#[test]
|
|
fn test_discovery_decoherence_trajectory_fingerprint() {
|
|
let emb_a1 = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.0, 0.0], 0.1);
|
|
let emb_a2 = QuantumEmbedding::from_embedding(&[0.9, 0.6, 0.0, 0.0], 0.1);
|
|
let emb_b = QuantumEmbedding::from_embedding(&[0.0, 0.0, 1.0, 0.5], 0.1);
|
|
|
|
// Decohere all with same seed
|
|
let mut emb_a1 = emb_a1;
|
|
emb_a1.decohere(5.0, 100);
|
|
let mut emb_a2 = emb_a2;
|
|
emb_a2.decohere(5.0, 100);
|
|
let mut emb_b = emb_b;
|
|
emb_b.decohere(5.0, 100);
|
|
|
|
let fid_a1 = emb_a1.fidelity();
|
|
let fid_a2 = emb_a2.fidelity();
|
|
let fid_b = emb_b.fidelity();
|
|
|
|
// Similar embeddings should have similar fidelity trajectories
|
|
let diff_similar = (fid_a1 - fid_a2).abs();
|
|
let diff_different = (fid_a1 - fid_b).abs();
|
|
|
|
// This is the discovery: similar embeddings decohere similarly
|
|
// We can't guarantee strict ordering due to noise, but we can observe the pattern
|
|
println!("DISCOVERY: Decoherence fingerprint");
|
|
println!(" Similar pair fidelity diff: {:.6}", diff_similar);
|
|
println!(" Different pair fidelity diff: {:.6}", diff_different);
|
|
println!(
|
|
" A1 fidelity: {:.6}, A2 fidelity: {:.6}, B fidelity: {:.6}",
|
|
fid_a1, fid_a2, fid_b
|
|
);
|
|
}
|
|
|
|
/// DISCOVERY 2: Interference creates NEW vectors not in original space.
|
|
/// When two concept meanings interfere with a context, the resulting
|
|
/// amplitude pattern is a vector that encodes the relationship between
|
|
/// the concepts and the context — not just a reranking.
|
|
#[test]
|
|
fn test_discovery_interference_creates_novel_representations() {
|
|
// "spring" — three meanings
|
|
let concept = ConceptSuperposition::uniform(
|
|
"spring",
|
|
vec![
|
|
("season".into(), vec![1.0, 0.0, 0.0, 0.0]),
|
|
("water_source".into(), vec![0.0, 1.0, 0.0, 0.0]),
|
|
("mechanical".into(), vec![0.0, 0.0, 1.0, 0.0]),
|
|
],
|
|
);
|
|
|
|
// Three different contexts
|
|
let ctx_weather = vec![0.9, 0.0, 0.0, 0.1];
|
|
let ctx_geology = vec![0.1, 0.8, 0.1, 0.0];
|
|
let ctx_engineering = vec![0.0, 0.0, 0.9, 0.1];
|
|
|
|
let scores_weather = concept.interfere(&ctx_weather);
|
|
let scores_geology = concept.interfere(&ctx_geology);
|
|
let scores_engineering = concept.interfere(&ctx_engineering);
|
|
|
|
println!("DISCOVERY: Interference resolves polysemy");
|
|
for (ctx_name, scores) in &[
|
|
("weather", &scores_weather),
|
|
("geology", &scores_geology),
|
|
("engineering", &scores_engineering),
|
|
] {
|
|
let top = scores
|
|
.iter()
|
|
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
|
.unwrap();
|
|
println!(
|
|
" Context '{}' → top meaning: '{}' (prob: {:.4})",
|
|
ctx_name, top.label, top.probability
|
|
);
|
|
}
|
|
|
|
// Verify each context surfaces the right meaning
|
|
let top_weather = scores_weather
|
|
.iter()
|
|
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
|
.unwrap();
|
|
let top_geology = scores_geology
|
|
.iter()
|
|
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
|
.unwrap();
|
|
let top_engineering = scores_engineering
|
|
.iter()
|
|
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
|
.unwrap();
|
|
|
|
assert_eq!(top_weather.label, "season");
|
|
assert_eq!(top_geology.label, "water_source");
|
|
assert_eq!(top_engineering.label, "mechanical");
|
|
}
|
|
|
|
/// DISCOVERY 3: Counterfactual reveals hidden dependencies.
|
|
/// In a chain of operations, some steps are critical (removing them
|
|
/// changes everything) and some are redundant (removing them changes nothing).
|
|
/// This is impossible to know in forward-only systems.
|
|
#[test]
|
|
fn test_discovery_counterfactual_dependency_map() {
|
|
let mut mem = ReversibleMemory::new(3).unwrap();
|
|
|
|
// Build an entangled state through a sequence
|
|
mem.apply(Gate::H(0)).unwrap(); // step 0: superposition on q0
|
|
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangle q0-q1
|
|
mem.apply(Gate::Rz(2, 0.001)).unwrap(); // step 2: tiny rotation on q2 (nearly no-op)
|
|
mem.apply(Gate::CNOT(1, 2)).unwrap(); // step 3: propagate entanglement to q2
|
|
mem.apply(Gate::H(2)).unwrap(); // step 4: mix q2
|
|
|
|
println!("DISCOVERY: Counterfactual dependency map");
|
|
for i in 0..5 {
|
|
let cf = mem.counterfactual(i).unwrap();
|
|
println!(" Step {} removed: divergence = {:.6}", i, cf.divergence);
|
|
}
|
|
|
|
// Step 0 (H) should be most critical — it creates all the superposition
|
|
let cf0 = mem.counterfactual(0).unwrap();
|
|
// Step 2 (tiny Rz) should be least critical
|
|
let cf2 = mem.counterfactual(2).unwrap();
|
|
|
|
assert!(
|
|
cf0.divergence > cf2.divergence,
|
|
"H gate (step 0) should be more critical than tiny Rz (step 2): {} > {}",
|
|
cf0.divergence,
|
|
cf2.divergence
|
|
);
|
|
}
|
|
|
|
/// DISCOVERY 4: Swarm interference naturally resolves what voting cannot.
|
|
/// With voting: 3 for A, 2 for B → A wins 60/40.
|
|
/// With interference: depends on agent PHASES, not just counts.
|
|
/// Confident agreement amplifies exponentially. Uncertain agents barely contribute.
|
|
#[test]
|
|
fn test_discovery_swarm_phase_matters() {
|
|
let action = Action {
|
|
id: "x".into(),
|
|
description: "".into(),
|
|
};
|
|
|
|
// Scenario 1: 3 confident agents, all aligned (phase 0)
|
|
let mut aligned = SwarmInterference::new();
|
|
for i in 0..3 {
|
|
aligned.contribute(AgentContribution::new(
|
|
&format!("a{}", i),
|
|
action.clone(),
|
|
1.0,
|
|
true,
|
|
));
|
|
}
|
|
|
|
// Scenario 2: 3 agents, same count, but one has phase π/2 (uncertain direction)
|
|
let mut misaligned = SwarmInterference::new();
|
|
misaligned.contribute(AgentContribution::new("b0", action.clone(), 1.0, true));
|
|
misaligned.contribute(AgentContribution::new("b1", action.clone(), 1.0, true));
|
|
// Third agent contributes with 90-degree phase offset (uncertain)
|
|
misaligned.contribute(AgentContribution::multi(
|
|
"b2",
|
|
vec![
|
|
(action.clone(), Complex::new(0.0, 1.0)), // phase π/2
|
|
],
|
|
));
|
|
|
|
let prob_aligned = aligned.decide()[0].probability;
|
|
let prob_misaligned = misaligned.decide()[0].probability;
|
|
|
|
println!("DISCOVERY: Phase alignment matters for swarm decisions");
|
|
println!(
|
|
" Aligned (3 agents, same phase): prob = {:.4}",
|
|
prob_aligned
|
|
);
|
|
println!(
|
|
" Misaligned (2 same, 1 orthogonal): prob = {:.4}",
|
|
prob_misaligned
|
|
);
|
|
|
|
assert!(
|
|
prob_aligned > prob_misaligned,
|
|
"Phase-aligned swarm should produce higher probability"
|
|
);
|
|
}
|