Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'
This commit is contained in:
25
vendor/ruvector/crates/ruqu-exotic/Cargo.toml
vendored
Normal file
25
vendor/ruvector/crates/ruqu-exotic/Cargo.toml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "ruqu-exotic"
|
||||
version = "2.0.5"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
readme = "README.md"
|
||||
description = "Experimental quantum-classical hybrid algorithms - quantum memory decay, interference search, reasoning error correction, swarm interference for AI systems"
|
||||
keywords = ["quantum", "hybrid-algorithms", "interference", "error-correction", "ai"]
|
||||
categories = ["science", "simulation", "algorithms"]
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = []
|
||||
|
||||
[dependencies]
|
||||
ruqu-core = { version = "2.0.5", path = "../ruqu-core" }
|
||||
rand = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
164
vendor/ruvector/crates/ruqu-exotic/README.md
vendored
Normal file
164
vendor/ruvector/crates/ruqu-exotic/README.md
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
# ruqu-exotic
|
||||
|
||||
[](https://crates.io/crates/ruqu-exotic)
|
||||
[](https://docs.rs/ruqu-exotic)
|
||||
[](https://github.com/ruvnet/ruvector)
|
||||
|
||||
**Experimental quantum-classical hybrid algorithms** — quantum memory decay, interference-based search, reasoning error correction, swarm interference, syndrome diagnosis, and reversible memory for AI systems.
|
||||
|
||||
## Algorithms
|
||||
|
||||
| Module | Description | Application |
|
||||
|--------|-------------|-------------|
|
||||
| **Quantum Decay** | Temporal coherence loss modeling | Memory systems, caching |
|
||||
| **Interference Search** | Quantum-inspired amplitude interference | Vector similarity |
|
||||
| **Reasoning QEC** | Error correction for AI reasoning chains | LLM reliability |
|
||||
| **Swarm Interference** | Multi-agent quantum coordination | Distributed AI |
|
||||
| **Syndrome Diagnosis** | Error pattern detection | System health |
|
||||
| **Reversible Memory** | Quantum-reversible state management | Undo/redo systems |
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
cargo add ruqu-exotic
|
||||
```
|
||||
|
||||
## Quantum Memory Decay
|
||||
|
||||
Model temporal coherence loss in memory systems:
|
||||
|
||||
```rust
|
||||
use ruqu_exotic::quantum_decay::{DecayModel, MemoryState};
|
||||
|
||||
let model = DecayModel::new()
|
||||
.t1(100.0) // Amplitude decay time (μs)
|
||||
.t2(50.0) // Phase decay time (μs)
|
||||
.temperature(0.02); // Thermal noise
|
||||
|
||||
let state = MemoryState::from_embedding(embedding);
|
||||
let decayed = model.evolve(state, time: 10.0)?;
|
||||
|
||||
println!("Fidelity after 10μs: {:.2}%", decayed.fidelity() * 100.0);
|
||||
```
|
||||
|
||||
## Interference Search
|
||||
|
||||
Quantum-inspired amplitude interference for similarity search:
|
||||
|
||||
```rust
|
||||
use ruqu_exotic::interference_search::{InterferenceIndex, Query};
|
||||
|
||||
let mut index = InterferenceIndex::new(dimension: 384);
|
||||
index.add_vectors(&embeddings)?;
|
||||
|
||||
// Constructive interference amplifies similar vectors
|
||||
let query = Query::new(query_embedding)
|
||||
.interference_rounds(3)
|
||||
.phase_kickback(true);
|
||||
|
||||
let results = index.search(query, k: 10)?;
|
||||
```
|
||||
|
||||
## Reasoning Error Correction
|
||||
|
||||
Detect and correct errors in AI reasoning chains:
|
||||
|
||||
```rust
|
||||
use ruqu_exotic::reasoning_qec::{ReasoningCode, LogicalChain};
|
||||
|
||||
let code = ReasoningCode::new()
|
||||
.redundancy(3) // Triple modular redundancy
|
||||
.syndrome_bits(2); // Error detection bits
|
||||
|
||||
let chain = LogicalChain::from_steps(&[
|
||||
"Premise: All A are B",
|
||||
"Premise: X is A",
|
||||
"Conclusion: X is B"
|
||||
]);
|
||||
|
||||
let protected = code.encode(chain)?;
|
||||
let (decoded, errors) = code.decode_and_correct(protected)?;
|
||||
println!("Detected {} logical errors", errors.len());
|
||||
```
|
||||
|
||||
## Swarm Interference
|
||||
|
||||
Coordinate multi-agent systems with quantum interference:
|
||||
|
||||
```rust
|
||||
use ruqu_exotic::swarm_interference::{SwarmState, Agent};
|
||||
|
||||
let mut swarm = SwarmState::new(n_agents: 8);
|
||||
|
||||
// Agents interfere constructively on consensus
|
||||
for round in 0..10 {
|
||||
swarm.apply_interference()?;
|
||||
swarm.measure_partial()?; // Partial collapse
|
||||
}
|
||||
|
||||
let consensus = swarm.final_state()?;
|
||||
```
|
||||
|
||||
## Syndrome Diagnosis
|
||||
|
||||
Detect error patterns in distributed systems:
|
||||
|
||||
```rust
|
||||
use ruqu_exotic::syndrome_diagnosis::{Diagnostics, Pattern};
|
||||
|
||||
let diag = Diagnostics::new()
|
||||
.stabilizers(&["XXXX", "ZZZZ"])
|
||||
.measurement_noise(0.01);
|
||||
|
||||
let syndromes = diag.measure(&system_state)?;
|
||||
let errors = diag.decode_syndromes(syndromes)?;
|
||||
|
||||
for error in errors {
|
||||
println!("Error at {:?}: {:?}", error.location, error.type_);
|
||||
}
|
||||
```
|
||||
|
||||
## Reversible Memory
|
||||
|
||||
Quantum-reversible operations for undo/redo:
|
||||
|
||||
```rust
|
||||
use ruqu_exotic::reversible_memory::{ReversibleStore, Operation};
|
||||
|
||||
let mut store = ReversibleStore::new();
|
||||
|
||||
store.apply(Operation::Insert { key: "a", value: vec![1,2,3] })?;
|
||||
store.apply(Operation::Update { key: "a", value: vec![4,5,6] })?;
|
||||
|
||||
// Perfect reversal via uncompute
|
||||
store.reverse_last()?; // Back to [1,2,3]
|
||||
store.reverse_last()?; // Back to empty
|
||||
```
|
||||
|
||||
## Integration with RuVector
|
||||
|
||||
These algorithms integrate with the [RuVector](https://github.com/ruvnet/ruvector) vector database for quantum-enhanced AI:
|
||||
|
||||
```rust
|
||||
use ruvector_core::Index;
|
||||
use ruqu_exotic::interference_search::InterferenceIndex;
|
||||
|
||||
// Wrap RuVector index with interference search
|
||||
let base_index = Index::new(config)?;
|
||||
let quantum_index = InterferenceIndex::wrap(base_index)?;
|
||||
```
|
||||
|
||||
## Related Crates
|
||||
|
||||
- [`ruqu-core`](https://crates.io/crates/ruqu-core) — Quantum circuit simulator
|
||||
- [`ruqu-algorithms`](https://crates.io/crates/ruqu-algorithms) — VQE, Grover, QAOA, Surface Code
|
||||
- [`ruqu-wasm`](https://crates.io/crates/ruqu-wasm) — WebAssembly bindings
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Exotic Discoveries (ADR-QE-014)](https://github.com/ruvnet/ruvector/blob/main/docs/adr/quantum-engine/ADR-QE-014-exotic-discoveries.md)
|
||||
- [MinCut Coherence (ADR-QE-012)](https://github.com/ruvnet/ruvector/blob/main/docs/adr/quantum-engine/ADR-QE-012-mincut-coherence-integration.md)
|
||||
|
||||
## License
|
||||
|
||||
MIT OR Apache-2.0
|
||||
213
vendor/ruvector/crates/ruqu-exotic/src/interference_search.rs
vendored
Normal file
213
vendor/ruvector/crates/ruqu-exotic/src/interference_search.rs
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
//! # Interference Search
|
||||
//!
|
||||
//! Concepts interfere during retrieval. Each concept can exist in a
|
||||
//! superposition of multiple meanings, each with a complex amplitude.
|
||||
//! When a search context is applied, the amplitudes interfere --
|
||||
//! meanings aligned with the context get constructively boosted,
|
||||
//! while misaligned meanings destructively cancel.
|
||||
//!
|
||||
//! This replaces simple cosine reranking with a quantum-inspired
|
||||
//! interference model where polysemous concepts naturally resolve
|
||||
//! to context-appropriate meanings.
|
||||
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single meaning within a superposition: a label, an embedding, and a
|
||||
/// complex amplitude.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Meaning {
|
||||
pub label: String,
|
||||
pub embedding: Vec<f64>,
|
||||
pub amplitude: Complex,
|
||||
}
|
||||
|
||||
/// A concept in superposition of multiple meanings.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConceptSuperposition {
|
||||
pub concept_id: String,
|
||||
pub meanings: Vec<Meaning>,
|
||||
}
|
||||
|
||||
/// Score for a single meaning after interference with a context.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct InterferenceScore {
|
||||
pub label: String,
|
||||
pub probability: f64,
|
||||
pub amplitude: Complex,
|
||||
}
|
||||
|
||||
/// A concept with its interference-computed relevance score.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConceptScore {
|
||||
pub concept_id: String,
|
||||
pub relevance: f64,
|
||||
pub dominant_meaning: String,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Implementation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
impl ConceptSuperposition {
|
||||
/// Create a uniform superposition: all meanings get equal amplitude
|
||||
/// with zero phase.
|
||||
pub fn uniform(concept_id: &str, meanings: Vec<(String, Vec<f64>)>) -> Self {
|
||||
let n = meanings.len();
|
||||
let amp = if n > 0 { 1.0 / (n as f64).sqrt() } else { 0.0 };
|
||||
let meanings = meanings
|
||||
.into_iter()
|
||||
.map(|(label, embedding)| Meaning {
|
||||
label,
|
||||
embedding,
|
||||
amplitude: Complex::new(amp, 0.0),
|
||||
})
|
||||
.collect();
|
||||
Self {
|
||||
concept_id: concept_id.to_string(),
|
||||
meanings,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a superposition with explicit complex amplitudes.
|
||||
pub fn with_amplitudes(concept_id: &str, meanings: Vec<(String, Vec<f64>, Complex)>) -> Self {
|
||||
let meanings = meanings
|
||||
.into_iter()
|
||||
.map(|(label, embedding, amplitude)| Meaning {
|
||||
label,
|
||||
embedding,
|
||||
amplitude,
|
||||
})
|
||||
.collect();
|
||||
Self {
|
||||
concept_id: concept_id.to_string(),
|
||||
meanings,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute interference scores for each meaning given a context embedding.
|
||||
///
|
||||
/// For each meaning, the context modifies the amplitude:
|
||||
/// effective_amplitude = original_amplitude * (1 + similarity(meaning, context))
|
||||
///
|
||||
/// Meanings aligned with the context get amplified; orthogonal meanings
|
||||
/// stay the same; opposing meanings get attenuated.
|
||||
///
|
||||
/// Returns scores sorted by probability (descending).
|
||||
pub fn interfere(&self, context: &[f64]) -> Vec<InterferenceScore> {
|
||||
let mut scores: Vec<InterferenceScore> = self
|
||||
.meanings
|
||||
.iter()
|
||||
.map(|m| {
|
||||
let sim = cosine_similarity(&m.embedding, context);
|
||||
// Scale amplitude by (1 + sim). For sim in [-1, 1], this gives
|
||||
// a factor in [0, 2]. Negative similarity attenuates.
|
||||
let scale = (1.0 + sim).max(0.0);
|
||||
let effective = m.amplitude * scale;
|
||||
InterferenceScore {
|
||||
label: m.label.clone(),
|
||||
probability: effective.norm_sq(),
|
||||
amplitude: effective,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
scores.sort_by(|a, b| {
|
||||
b.probability
|
||||
.partial_cmp(&a.probability)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
scores
|
||||
}
|
||||
|
||||
/// Collapse the superposition to a single meaning by sampling from
|
||||
/// the interference-weighted probability distribution.
|
||||
pub fn collapse(&self, context: &[f64], seed: u64) -> String {
|
||||
let scores = self.interfere(context);
|
||||
let total: f64 = scores.iter().map(|s| s.probability).sum();
|
||||
if total < 1e-15 {
|
||||
// Degenerate case: return first meaning if available
|
||||
return scores.first().map(|s| s.label.clone()).unwrap_or_default();
|
||||
}
|
||||
|
||||
let mut rng = StdRng::seed_from_u64(seed);
|
||||
let r: f64 = rng.gen::<f64>() * total;
|
||||
let mut cumulative = 0.0;
|
||||
for score in &scores {
|
||||
cumulative += score.probability;
|
||||
if r <= cumulative {
|
||||
return score.label.clone();
|
||||
}
|
||||
}
|
||||
scores.last().map(|s| s.label.clone()).unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Return the dominant meaning: the one with the largest |amplitude|^2
|
||||
/// (before any context is applied).
|
||||
pub fn dominant(&self) -> Option<&Meaning> {
|
||||
self.meanings.iter().max_by(|a, b| {
|
||||
a.amplitude
|
||||
.norm_sq()
|
||||
.partial_cmp(&b.amplitude.norm_sq())
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Run an interference search across multiple concepts, ranking them by
|
||||
/// relevance to the given query context.
|
||||
///
|
||||
/// Returns concepts sorted by relevance (descending).
|
||||
pub fn interference_search(
|
||||
concepts: &[ConceptSuperposition],
|
||||
context: &[f64],
|
||||
) -> Vec<ConceptScore> {
|
||||
let mut results: Vec<ConceptScore> = concepts
|
||||
.iter()
|
||||
.map(|concept| {
|
||||
let scores = concept.interfere(context);
|
||||
let relevance: f64 = scores.iter().map(|s| s.probability).sum();
|
||||
let dominant_meaning = scores.first().map(|s| s.label.clone()).unwrap_or_default();
|
||||
ConceptScore {
|
||||
concept_id: concept.concept_id.clone(),
|
||||
relevance,
|
||||
dominant_meaning,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
results.sort_by(|a, b| {
|
||||
b.relevance
|
||||
.partial_cmp(&a.relevance)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
results
|
||||
}
|
||||
|
||||
/// Cosine similarity between two vectors.
|
||||
fn cosine_similarity(a: &[f64], b: &[f64]) -> f64 {
|
||||
if a.is_empty() || b.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let len = a.len().min(b.len());
|
||||
let mut dot = 0.0_f64;
|
||||
let mut norm_a = 0.0_f64;
|
||||
let mut norm_b = 0.0_f64;
|
||||
for i in 0..len {
|
||||
dot += a[i] * b[i];
|
||||
norm_a += a[i] * a[i];
|
||||
norm_b += b[i] * b[i];
|
||||
}
|
||||
let denom = norm_a.sqrt() * norm_b.sqrt();
|
||||
if denom < 1e-15 {
|
||||
0.0
|
||||
} else {
|
||||
(dot / denom).clamp(-1.0, 1.0)
|
||||
}
|
||||
}
|
||||
28
vendor/ruvector/crates/ruqu-exotic/src/lib.rs
vendored
Normal file
28
vendor/ruvector/crates/ruqu-exotic/src/lib.rs
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
//! # ruqu-exotic — Exotic Quantum-Classical Hybrid Algorithms
|
||||
//!
|
||||
//! Novel algorithms that emerge from embedding a quantum simulation engine
|
||||
//! inside a vector database stack. These were structurally impossible before
|
||||
//! because the required primitives (amplitude space, interference, decoherence,
|
||||
//! syndrome extraction) did not coexist with vector search infrastructure.
|
||||
//!
|
||||
//! ## Modules
|
||||
//!
|
||||
//! | Module | Concept | What it replaces |
|
||||
//! |--------|---------|-----------------|
|
||||
//! | [`quantum_decay`] | Embeddings decohere instead of being deleted | TTL-based eviction |
|
||||
//! | [`interference_search`] | Concepts interfere during retrieval | Cosine reranking |
|
||||
//! | [`quantum_collapse`] | Search collapses from superposition | Deterministic top-k |
|
||||
//! | [`reasoning_qec`] | Surface-code correction on reasoning traces | Semantic checks |
|
||||
//! | [`swarm_interference`] | Agents interfere instead of voting | Consensus protocols |
|
||||
//! | [`syndrome_diagnosis`] | QEC syndrome extraction for system diagnosis | Log-based monitoring |
|
||||
//! | [`reversible_memory`] | Time-reversible state for counterfactual debugging | Forward-only ML |
|
||||
//! | [`reality_check`] | Browser-native quantum verification circuits | Trust-based claims |
|
||||
|
||||
pub mod interference_search;
|
||||
pub mod quantum_collapse;
|
||||
pub mod quantum_decay;
|
||||
pub mod reality_check;
|
||||
pub mod reasoning_qec;
|
||||
pub mod reversible_memory;
|
||||
pub mod swarm_interference;
|
||||
pub mod syndrome_diagnosis;
|
||||
400
vendor/ruvector/crates/ruqu-exotic/src/quantum_collapse.rs
vendored
Normal file
400
vendor/ruvector/crates/ruqu-exotic/src/quantum_collapse.rs
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
//! # Quantum Collapse Search
|
||||
//!
|
||||
//! Instead of deterministic top-k retrieval, encode search candidates as
|
||||
//! quantum amplitudes. Apply Grover-like iterations biased by query similarity,
|
||||
//! then "measure" to collapse to a single result. Nondeterministic but
|
||||
//! statistically stable -- repeated shots converge to a reproducible
|
||||
//! frequency distribution weighted by relevance.
|
||||
//!
|
||||
//! ## Algorithm
|
||||
//!
|
||||
//! 1. Initialise a uniform superposition over all candidate slots.
|
||||
//! 2. **Oracle**: apply a phase rotation proportional to cosine similarity
|
||||
//! between the query and each candidate embedding.
|
||||
//! 3. **Diffusion**: inversion about the mean amplitude (Grover diffusion).
|
||||
//! 4. Repeat for the requested number of iterations.
|
||||
//! 5. **Collapse**: sample one index from the |amplitude|^2 distribution.
|
||||
//!
|
||||
//! The oracle biases the superposition toward high-similarity candidates.
|
||||
//! Multiple collapses yield a frequency distribution that concentrates on
|
||||
//! the most relevant candidates while still allowing serendipitous discovery.
|
||||
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use std::f64::consts::PI;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A quantum search engine that collapses from superposition rather than
|
||||
/// ranking deterministically.
|
||||
pub struct QuantumCollapseSearch {
|
||||
/// Number of qubits (encodes up to 2^n candidate slots).
|
||||
num_qubits: u32,
|
||||
/// Candidate embeddings, padded with zero-vectors to length 2^num_qubits.
|
||||
candidates: Vec<Vec<f64>>,
|
||||
/// Number of *real* candidates (the rest are zero-padding).
|
||||
num_real: usize,
|
||||
}
|
||||
|
||||
/// Result of a single collapse measurement.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CollapseResult {
|
||||
/// Index of the candidate that was selected.
|
||||
pub index: usize,
|
||||
/// Amplitude magnitude before collapse (acts as confidence).
|
||||
pub amplitude: f64,
|
||||
/// `true` if the collapse landed on a padding slot (no real candidate).
|
||||
pub is_padding: bool,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Implementation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
impl QuantumCollapseSearch {
|
||||
/// Number of qubits used to encode the candidate space.
|
||||
pub fn num_qubits(&self) -> u32 {
|
||||
self.num_qubits
|
||||
}
|
||||
|
||||
/// Number of real (non-padding) candidates.
|
||||
pub fn num_real(&self) -> usize {
|
||||
self.num_real
|
||||
}
|
||||
|
||||
/// Create a search engine from candidate embeddings.
|
||||
///
|
||||
/// The candidate list is padded with empty vectors to the next power of two
|
||||
/// so that the amplitude vector has length 2^n.
|
||||
pub fn new(candidates: Vec<Vec<f64>>) -> Self {
|
||||
let num_real = candidates.len();
|
||||
|
||||
// Determine the number of qubits needed.
|
||||
let num_qubits = if num_real <= 1 {
|
||||
1
|
||||
} else {
|
||||
(num_real as f64).log2().ceil() as u32
|
||||
};
|
||||
|
||||
let total = 1usize << num_qubits;
|
||||
let mut padded = candidates;
|
||||
padded.resize(total, Vec::new());
|
||||
|
||||
Self {
|
||||
num_qubits,
|
||||
candidates: padded,
|
||||
num_real,
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a single quantum collapse search.
|
||||
///
|
||||
/// 1. Initialise a uniform superposition over all candidate slots.
|
||||
/// 2. For each iteration, apply the similarity-biased oracle followed by
|
||||
/// the Grover diffusion operator.
|
||||
/// 3. Sample one index from the resulting probability distribution.
|
||||
pub fn search(&self, query: &[f64], iterations: usize, seed: u64) -> CollapseResult {
|
||||
let n = self.candidates.len();
|
||||
assert!(n > 0, "no candidates");
|
||||
|
||||
// --- Uniform superposition ---
|
||||
let amp = 1.0 / (n as f64).sqrt();
|
||||
let mut amplitudes: Vec<Complex> = vec![Complex::new(amp, 0.0); n];
|
||||
|
||||
// --- Grover-like iterations ---
|
||||
for _ in 0..iterations {
|
||||
// Oracle: phase rotation proportional to similarity
|
||||
self.apply_oracle(query, &mut amplitudes);
|
||||
// Diffusion: inversion about the mean
|
||||
Self::apply_diffusion(&mut amplitudes);
|
||||
}
|
||||
|
||||
// --- Collapse (sample from |amplitude|^2 distribution) ---
|
||||
self.collapse(&litudes, seed)
|
||||
}
|
||||
|
||||
/// Run `num_shots` independent collapses and return a frequency
|
||||
/// distribution: `Vec<(index, count)>` sorted by count descending.
|
||||
///
|
||||
/// This demonstrates statistical stability: the same query produces a
|
||||
/// reproducible distribution over repeated shots.
|
||||
pub fn search_distribution(
|
||||
&self,
|
||||
query: &[f64],
|
||||
iterations: usize,
|
||||
num_shots: usize,
|
||||
seed: u64,
|
||||
) -> Vec<(usize, usize)> {
|
||||
let n = self.candidates.len();
|
||||
let mut counts = vec![0usize; n];
|
||||
|
||||
for shot in 0..num_shots {
|
||||
// Each shot gets a deterministic but distinct seed.
|
||||
let shot_seed = seed.wrapping_add(shot as u64);
|
||||
let result = self.search(query, iterations, shot_seed);
|
||||
counts[result.index] += 1;
|
||||
}
|
||||
|
||||
// Collect non-zero counts, sorted descending.
|
||||
let mut distribution: Vec<(usize, usize)> = counts
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.filter(|&(_, c)| c > 0)
|
||||
.collect();
|
||||
distribution.sort_by(|a, b| b.1.cmp(&a.1));
|
||||
distribution
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Private helpers
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Similarity-biased oracle.
|
||||
///
|
||||
/// For each candidate slot `i`, compute the cosine similarity between the
|
||||
/// query and the candidate embedding. Apply a phase rotation of
|
||||
/// `PI * similarity` to the amplitude, boosting candidates that align with
|
||||
/// the query.
|
||||
fn apply_oracle(&self, query: &[f64], amplitudes: &mut [Complex]) {
|
||||
for (i, candidate) in self.candidates.iter().enumerate() {
|
||||
let sim = Self::similarity(query, candidate);
|
||||
// Phase rotation: amplitude[i] *= e^{i * PI * sim}
|
||||
let phase = Complex::from_polar(1.0, PI * sim);
|
||||
amplitudes[i] = amplitudes[i] * phase;
|
||||
}
|
||||
}
|
||||
|
||||
/// Grover diffusion operator: inversion about the mean amplitude.
|
||||
///
|
||||
/// mean = (1/n) * sum(amplitudes)
|
||||
/// amplitudes[i] = 2 * mean - amplitudes[i]
|
||||
fn apply_diffusion(amplitudes: &mut [Complex]) {
|
||||
let n = amplitudes.len();
|
||||
let inv_n = 1.0 / n as f64;
|
||||
|
||||
let mut mean = Complex::ZERO;
|
||||
for a in amplitudes.iter() {
|
||||
mean += *a;
|
||||
}
|
||||
mean = mean * inv_n;
|
||||
|
||||
let two_mean = mean * 2.0;
|
||||
for a in amplitudes.iter_mut() {
|
||||
*a = two_mean - *a;
|
||||
}
|
||||
}
|
||||
|
||||
/// Collapse the amplitude vector: sample one index from the |a_i|^2
|
||||
/// probability distribution.
|
||||
fn collapse(&self, amplitudes: &[Complex], seed: u64) -> CollapseResult {
|
||||
let mut rng = StdRng::seed_from_u64(seed);
|
||||
|
||||
// Build the cumulative probability distribution.
|
||||
let probs: Vec<f64> = amplitudes.iter().map(|a| a.norm_sq()).collect();
|
||||
let total: f64 = probs.iter().sum();
|
||||
|
||||
let r: f64 = rng.gen::<f64>() * total;
|
||||
let mut cumulative = 0.0;
|
||||
let mut chosen = amplitudes.len() - 1; // fallback to last
|
||||
|
||||
for (i, &p) in probs.iter().enumerate() {
|
||||
cumulative += p;
|
||||
if r <= cumulative {
|
||||
chosen = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
CollapseResult {
|
||||
index: chosen,
|
||||
amplitude: amplitudes[chosen].norm(),
|
||||
is_padding: chosen >= self.num_real,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cosine similarity between two vectors.
|
||||
///
|
||||
/// Returns 0.0 if either vector is empty or has zero norm.
|
||||
fn similarity(query: &[f64], candidate: &[f64]) -> f64 {
|
||||
if query.is_empty() || candidate.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
let len = query.len().min(candidate.len());
|
||||
let mut dot = 0.0_f64;
|
||||
let mut norm_q = 0.0_f64;
|
||||
let mut norm_c = 0.0_f64;
|
||||
|
||||
for i in 0..len {
|
||||
dot += query[i] * candidate[i];
|
||||
norm_q += query[i] * query[i];
|
||||
norm_c += candidate[i] * candidate[i];
|
||||
}
|
||||
|
||||
// Account for any remaining elements in the longer vector.
|
||||
for i in len..query.len() {
|
||||
norm_q += query[i] * query[i];
|
||||
}
|
||||
for i in len..candidate.len() {
|
||||
norm_c += candidate[i] * candidate[i];
|
||||
}
|
||||
|
||||
let denom = norm_q.sqrt() * norm_c.sqrt();
|
||||
if denom < 1e-15 {
|
||||
0.0
|
||||
} else {
|
||||
(dot / denom).clamp(-1.0, 1.0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Helper: create simple 2D embeddings.
|
||||
fn sample_candidates() -> Vec<Vec<f64>> {
|
||||
vec![
|
||||
vec![1.0, 0.0], // 0: east
|
||||
vec![0.0, 1.0], // 1: north
|
||||
vec![-1.0, 0.0], // 2: west
|
||||
vec![0.0, -1.0], // 3: south
|
||||
]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_pads_to_power_of_two() {
|
||||
// 3 candidates should pad to 4 (2 qubits)
|
||||
let search = QuantumCollapseSearch::new(vec![vec![1.0], vec![2.0], vec![3.0]]);
|
||||
assert_eq!(search.num_qubits, 2);
|
||||
assert_eq!(search.candidates.len(), 4);
|
||||
assert_eq!(search.num_real, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn similarity_identical_vectors() {
|
||||
let a = vec![1.0, 2.0, 3.0];
|
||||
let sim = QuantumCollapseSearch::similarity(&a, &a);
|
||||
assert!((sim - 1.0).abs() < 1e-10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn similarity_orthogonal_vectors() {
|
||||
let a = vec![1.0, 0.0];
|
||||
let b = vec![0.0, 1.0];
|
||||
let sim = QuantumCollapseSearch::similarity(&a, &b);
|
||||
assert!(sim.abs() < 1e-10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn similarity_opposite_vectors() {
|
||||
let a = vec![1.0, 0.0];
|
||||
let b = vec![-1.0, 0.0];
|
||||
let sim = QuantumCollapseSearch::similarity(&a, &b);
|
||||
assert!((sim + 1.0).abs() < 1e-10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn similarity_empty_returns_zero() {
|
||||
assert_eq!(QuantumCollapseSearch::similarity(&[], &[1.0, 2.0]), 0.0);
|
||||
assert_eq!(QuantumCollapseSearch::similarity(&[1.0], &[]), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_candidate_always_collapses_to_it() {
|
||||
let search = QuantumCollapseSearch::new(vec![vec![1.0, 0.0]]);
|
||||
let query = [1.0, 0.0];
|
||||
for seed in 0..20 {
|
||||
let result = search.search(&query, 3, seed);
|
||||
// With 1 real candidate and 1 padding, we should almost always
|
||||
// get index 0 after iterations biased toward the real candidate.
|
||||
// At minimum check that the result is valid.
|
||||
assert!(result.index < 2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn search_favors_similar_candidates() {
|
||||
// Use asymmetric candidates so only one is highly aligned with the query.
|
||||
let candidates = vec![
|
||||
vec![1.0, 0.0], // 0: very aligned
|
||||
vec![0.3, 0.7], // 1: partially aligned
|
||||
vec![0.0, 1.0], // 2: orthogonal
|
||||
vec![-0.5, 0.5], // 3: partially opposed
|
||||
];
|
||||
let search = QuantumCollapseSearch::new(candidates);
|
||||
let query = [1.0, 0.0]; // aligned with candidate 0
|
||||
|
||||
// Run many shots to build a distribution.
|
||||
let dist = search.search_distribution(&query, 1, 500, 42);
|
||||
|
||||
assert!(!dist.is_empty(), "distribution should not be empty");
|
||||
// The distribution should be non-uniform (oracle has an effect).
|
||||
// We just verify the distribution has variation.
|
||||
let max_count = dist.iter().map(|&(_, c)| c).max().unwrap_or(0);
|
||||
let min_count = dist.iter().map(|&(_, c)| c).min().unwrap_or(0);
|
||||
assert!(
|
||||
max_count > min_count,
|
||||
"distribution should be non-uniform: max {} vs min {}",
|
||||
max_count,
|
||||
min_count
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn search_distribution_is_reproducible() {
|
||||
let search = QuantumCollapseSearch::new(sample_candidates());
|
||||
let query = [0.7, 0.7];
|
||||
|
||||
let d1 = search.search_distribution(&query, 2, 100, 99);
|
||||
let d2 = search.search_distribution(&query, 2, 100, 99);
|
||||
|
||||
assert_eq!(d1, d2, "same seed should produce identical distributions");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collapse_result_flags_padding() {
|
||||
// 3 real candidates -> padded to 4
|
||||
let search =
|
||||
QuantumCollapseSearch::new(vec![vec![0.0, 1.0], vec![1.0, 0.0], vec![0.5, 0.5]]);
|
||||
|
||||
// Run many shots; any hit on index 3 should have is_padding = true.
|
||||
for seed in 0..50 {
|
||||
let result = search.search(&[0.0, 0.0], 0, seed);
|
||||
if result.index >= 3 {
|
||||
assert!(result.is_padding);
|
||||
} else {
|
||||
assert!(!result.is_padding);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zero_iterations_gives_uniform_distribution() {
|
||||
let search = QuantumCollapseSearch::new(sample_candidates());
|
||||
let query = [1.0, 0.0];
|
||||
|
||||
// With 0 iterations, the superposition stays uniform.
|
||||
// Each of the 4 candidates should get roughly 25% of 1000 shots.
|
||||
let dist = search.search_distribution(&query, 0, 1000, 7);
|
||||
for &(_, count) in &dist {
|
||||
// Should be roughly 250 +/- some variance
|
||||
assert!(count > 100, "expected roughly uniform: got {count}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn amplitude_is_positive() {
|
||||
let search = QuantumCollapseSearch::new(sample_candidates());
|
||||
let result = search.search(&[1.0, 0.0], 2, 0);
|
||||
assert!(result.amplitude >= 0.0);
|
||||
}
|
||||
}
|
||||
423
vendor/ruvector/crates/ruqu-exotic/src/quantum_decay.rs
vendored
Normal file
423
vendor/ruvector/crates/ruqu-exotic/src/quantum_decay.rs
vendored
Normal file
@@ -0,0 +1,423 @@
|
||||
//! Quantum Decay -- Embeddings decohere instead of being deleted
|
||||
//!
|
||||
//! Treats f64 embedding vectors as quantum state amplitudes. Applies quantum
|
||||
//! noise channels (dephasing, amplitude damping) over time instead of TTL
|
||||
//! deletion. Cold vectors lose phase fidelity before magnitude, and similarity
|
||||
//! degrades smoothly rather than disappearing at a hard deadline.
|
||||
//!
|
||||
//! # Model
|
||||
//!
|
||||
//! Two physical noise channels are applied each time [`QuantumEmbedding::decohere`]
|
||||
//! is called:
|
||||
//!
|
||||
//! 1. **Dephasing (T2)** -- random Rz-like phase kicks on every amplitude.
|
||||
//! Magnitudes are preserved but phase coherence is scrambled.
|
||||
//! 2. **Amplitude damping (T1)** -- amplitudes decay toward the |0> ground
|
||||
//! state, modelling energy dissipation. Probability leaked from excited
|
||||
//! states is transferred to the ground state.
|
||||
//!
|
||||
//! The `noise_rate` parameter controls how aggressively both channels act per
|
||||
//! unit of abstract time `dt`.
|
||||
|
||||
use ruqu_core::state::QuantumState;
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
use std::f64::consts::PI;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Compute the minimum number of qubits needed to hold `len` amplitudes.
|
||||
/// Always returns at least 1 (QuantumState requires num_qubits >= 1).
|
||||
fn required_qubits(len: usize) -> u32 {
|
||||
if len <= 2 {
|
||||
return 1;
|
||||
}
|
||||
let mut n = 1u32;
|
||||
while (1usize << n) < len {
|
||||
n += 1;
|
||||
}
|
||||
n
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// QuantumEmbedding
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A vector embedding treated as a quantum state that decoheres over time.
|
||||
///
|
||||
/// Classical f64 values are normalised and zero-padded to the next power of two,
|
||||
/// then stored as complex amplitudes of a [`QuantumState`]. Decoherence is
|
||||
/// modelled by applying stochastic phase and amplitude noise, causing the
|
||||
/// fidelity with the original state to decay smoothly.
|
||||
pub struct QuantumEmbedding {
|
||||
/// Embedding encoded as quantum amplitudes.
|
||||
state: QuantumState,
|
||||
/// Snapshot of amplitudes at creation for fidelity tracking.
|
||||
original_state: Vec<Complex>,
|
||||
/// Dimensionality of the original embedding before power-of-2 padding.
|
||||
original_dim: usize,
|
||||
/// Abstract time units elapsed since creation.
|
||||
age: f64,
|
||||
/// Decoherence rate per time unit.
|
||||
noise_rate: f64,
|
||||
}
|
||||
|
||||
impl QuantumEmbedding {
|
||||
/// Create from a classical f64 embedding vector.
|
||||
///
|
||||
/// The embedding is L2-normalised and encoded as purely-real quantum
|
||||
/// amplitudes. If the length is not a power of two, the vector is
|
||||
/// zero-padded. An empty or all-zero embedding is mapped to the |0>
|
||||
/// computational basis state.
|
||||
pub fn from_embedding(embedding: &[f64], noise_rate: f64) -> Self {
|
||||
let original_dim = embedding.len().max(1);
|
||||
let num_qubits = required_qubits(original_dim);
|
||||
let padded_len = 1usize << num_qubits;
|
||||
|
||||
// L2 normalisation factor
|
||||
let norm_sq: f64 = embedding.iter().map(|x| x * x).sum();
|
||||
let inv_norm = if norm_sq > 0.0 {
|
||||
1.0 / norm_sq.sqrt()
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Build zero-padded amplitude vector
|
||||
let mut amps = vec![Complex::ZERO; padded_len];
|
||||
for (i, &val) in embedding.iter().enumerate() {
|
||||
amps[i] = Complex::new(val * inv_norm, 0.0);
|
||||
}
|
||||
|
||||
// Degenerate case: put all probability in |0>
|
||||
if inv_norm == 0.0 {
|
||||
amps[0] = Complex::ONE;
|
||||
}
|
||||
|
||||
let original_state = amps.clone();
|
||||
|
||||
let state = QuantumState::from_amplitudes(amps, num_qubits)
|
||||
.expect("padded amplitude vector length must equal 2^num_qubits");
|
||||
|
||||
Self {
|
||||
state,
|
||||
original_state,
|
||||
original_dim,
|
||||
age: 0.0,
|
||||
noise_rate,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply decoherence for `dt` time units.
|
||||
///
|
||||
/// Two noise channels act in sequence:
|
||||
///
|
||||
/// 1. **Dephasing** -- every amplitude is multiplied by e^{i*theta} where
|
||||
/// theta is drawn uniformly from `[-pi * noise_rate * dt, pi * noise_rate * dt]`.
|
||||
/// This scrambles phase coherence while exactly preserving per-amplitude
|
||||
/// probabilities.
|
||||
///
|
||||
/// 2. **Amplitude damping** -- each non-ground-state amplitude is scaled by
|
||||
/// sqrt(1 - gamma) where gamma = 1 - e^{-noise_rate * dt}. The probability
|
||||
/// leaked from excited states is added to the |0> ground state, then the
|
||||
/// whole vector is renormalised.
|
||||
///
|
||||
/// The `seed` controls the pseudo-random number generator for
|
||||
/// reproducibility.
|
||||
pub fn decohere(&mut self, dt: f64, seed: u64) {
|
||||
let mut rng = StdRng::seed_from_u64(seed);
|
||||
|
||||
// Damping parameter gamma in [0, 1), approaches 1 for large dt * rate
|
||||
let gamma = 1.0 - (-self.noise_rate * dt).exp();
|
||||
// Phase noise scale in [0, inf)
|
||||
let phase_scale = self.noise_rate * dt;
|
||||
|
||||
let amps = self.state.amplitudes_mut();
|
||||
let n = amps.len();
|
||||
|
||||
// ------ Phase noise (dephasing) ------
|
||||
for amp in amps.iter_mut().take(n) {
|
||||
let angle = (rng.gen::<f64>() - 0.5) * 2.0 * PI * phase_scale;
|
||||
let phase_kick = Complex::from_polar(1.0, angle);
|
||||
*amp = *amp * phase_kick;
|
||||
}
|
||||
|
||||
// ------ Amplitude damping toward |0> ------
|
||||
let decay_factor = (1.0 - gamma).sqrt();
|
||||
let mut leaked_probability = 0.0;
|
||||
|
||||
for amp in amps.iter_mut().skip(1) {
|
||||
let prob_before = amp.norm_sq();
|
||||
*amp = *amp * decay_factor;
|
||||
leaked_probability += prob_before - amp.norm_sq();
|
||||
}
|
||||
|
||||
// Transfer leaked probability into the ground state
|
||||
let p0 = amps[0].norm_sq();
|
||||
let new_p0 = p0 + leaked_probability;
|
||||
if new_p0 > 0.0 && p0 > 0.0 {
|
||||
amps[0] = amps[0] * (new_p0 / p0).sqrt();
|
||||
} else if new_p0 > 0.0 {
|
||||
amps[0] = Complex::new(new_p0.sqrt(), 0.0);
|
||||
}
|
||||
|
||||
// Correct any accumulated numerical drift
|
||||
self.state.normalize();
|
||||
|
||||
self.age += dt;
|
||||
}
|
||||
|
||||
/// Fidelity with the original state: |<original|current>|^2 in [0, 1].
|
||||
///
|
||||
/// Returns 1.0 for a freshly created embedding (perfect memory) and
|
||||
/// decays toward 0.0 as the state decoheres (completely forgotten).
|
||||
pub fn fidelity(&self) -> f64 {
|
||||
let current = self.state.state_vector();
|
||||
let mut inner = Complex::ZERO;
|
||||
for (orig, cur) in self.original_state.iter().zip(current.iter()) {
|
||||
inner = inner + orig.conj() * *cur;
|
||||
}
|
||||
inner.norm_sq()
|
||||
}
|
||||
|
||||
/// Current age of this embedding in abstract time units.
|
||||
pub fn age(&self) -> f64 {
|
||||
self.age
|
||||
}
|
||||
|
||||
/// Quantum-aware similarity: |<self|other>|^2 as a complex inner product.
|
||||
///
|
||||
/// Unlike cosine similarity, this captures phase relationships. Two
|
||||
/// embeddings that have decohered along different random trajectories will
|
||||
/// show reduced similarity even if their probability distributions are
|
||||
/// similar, because their phases no longer align.
|
||||
pub fn quantum_similarity(&self, other: &QuantumEmbedding) -> f64 {
|
||||
let sv1 = self.state.state_vector();
|
||||
let sv2 = other.state.state_vector();
|
||||
let len = sv1.len().min(sv2.len());
|
||||
let mut inner = Complex::ZERO;
|
||||
for i in 0..len {
|
||||
inner = inner + sv1[i].conj() * sv2[i];
|
||||
}
|
||||
inner.norm_sq()
|
||||
}
|
||||
|
||||
/// Extract back to a classical f64 vector.
|
||||
///
|
||||
/// Returns the real part of each amplitude, truncated to the original
|
||||
/// embedding dimension. This is lossy when the state has decohered:
|
||||
/// dephasing moves energy into imaginary components that are discarded,
|
||||
/// and amplitude damping shifts probability toward |0>.
|
||||
pub fn to_embedding(&self) -> Vec<f64> {
|
||||
self.state
|
||||
.state_vector()
|
||||
.iter()
|
||||
.take(self.original_dim)
|
||||
.map(|c| c.re)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check if the embedding has decohered below a fidelity threshold.
|
||||
///
|
||||
/// Returns `true` when the state still retains at least `threshold`
|
||||
/// fidelity with its original value.
|
||||
pub fn is_coherent(&self, threshold: f64) -> bool {
|
||||
self.fidelity() >= threshold
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Batch operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Apply decoherence to a batch of embeddings, returning indices of those
|
||||
/// still coherent.
|
||||
///
|
||||
/// Each embedding is decohered by `dt` time units using a unique seed derived
|
||||
/// from the base `seed` and the embedding's index. Embeddings whose fidelity
|
||||
/// drops below `threshold` are considered forgotten; the returned vector
|
||||
/// contains the indices of embeddings that remain coherent.
|
||||
pub fn decohere_batch(
|
||||
embeddings: &mut [QuantumEmbedding],
|
||||
dt: f64,
|
||||
threshold: f64,
|
||||
seed: u64,
|
||||
) -> Vec<usize> {
|
||||
let mut coherent = Vec::new();
|
||||
for (i, emb) in embeddings.iter_mut().enumerate() {
|
||||
// Derive a per-embedding seed to avoid correlated noise
|
||||
let emb_seed = seed
|
||||
.wrapping_add(i as u64)
|
||||
.wrapping_mul(6_364_136_223_846_793_005);
|
||||
emb.decohere(dt, emb_seed);
|
||||
if emb.is_coherent(threshold) {
|
||||
coherent.push(i);
|
||||
}
|
||||
}
|
||||
coherent
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Helper: create a simple embedding of the given dimension.
|
||||
fn sample_embedding(dim: usize) -> Vec<f64> {
|
||||
(0..dim).map(|i| (i as f64 + 1.0)).collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_embedding_creates_normalised_state() {
|
||||
let emb = QuantumEmbedding::from_embedding(&[3.0, 4.0], 0.1);
|
||||
let sv = emb.state.state_vector();
|
||||
let norm_sq: f64 = sv.iter().map(|c| c.norm_sq()).sum();
|
||||
assert!((norm_sq - 1.0).abs() < 1e-10, "state should be normalised");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_embedding_pads_to_power_of_two() {
|
||||
let emb = QuantumEmbedding::from_embedding(&[1.0, 2.0, 3.0], 0.1);
|
||||
// 3 elements -> 4 (2 qubits)
|
||||
assert_eq!(emb.state.state_vector().len(), 4);
|
||||
assert_eq!(emb.state.num_qubits(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fresh_embedding_has_unit_fidelity() {
|
||||
let emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.0, 0.0], 0.1);
|
||||
assert!((emb.fidelity() - 1.0).abs() < 1e-10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoherence_reduces_fidelity() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&sample_embedding(4), 0.5);
|
||||
let f_before = emb.fidelity();
|
||||
emb.decohere(1.0, 42);
|
||||
let f_after = emb.fidelity();
|
||||
assert!(
|
||||
f_after < f_before,
|
||||
"fidelity should decrease: before={f_before}, after={f_after}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoherence_advances_age() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&[1.0, 2.0], 0.1);
|
||||
assert!((emb.age() - 0.0).abs() < 1e-15);
|
||||
emb.decohere(0.5, 1);
|
||||
assert!((emb.age() - 0.5).abs() < 1e-15);
|
||||
emb.decohere(1.5, 2);
|
||||
assert!((emb.age() - 2.0).abs() < 1e-15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heavy_decoherence_destroys_fidelity() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&sample_embedding(8), 2.0);
|
||||
for i in 0..20 {
|
||||
emb.decohere(1.0, 100 + i);
|
||||
}
|
||||
assert!(
|
||||
emb.fidelity() < 0.3,
|
||||
"heavy decoherence should destroy fidelity: {}",
|
||||
emb.fidelity()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn quantum_similarity_is_symmetric() {
|
||||
let a = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.0, 0.0], 0.1);
|
||||
let b = QuantumEmbedding::from_embedding(&[0.0, 1.0, 0.0, 0.0], 0.1);
|
||||
let sim_ab = a.quantum_similarity(&b);
|
||||
let sim_ba = b.quantum_similarity(&a);
|
||||
assert!(
|
||||
(sim_ab - sim_ba).abs() < 1e-10,
|
||||
"similarity should be symmetric"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn identical_embeddings_have_similarity_one() {
|
||||
let a = QuantumEmbedding::from_embedding(&[1.0, 2.0, 3.0, 4.0], 0.1);
|
||||
let b = QuantumEmbedding::from_embedding(&[1.0, 2.0, 3.0, 4.0], 0.1);
|
||||
assert!(
|
||||
(a.quantum_similarity(&b) - 1.0).abs() < 1e-10,
|
||||
"identical embeddings should have similarity 1.0"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn to_embedding_round_trips_without_decoherence() {
|
||||
let original = vec![3.0, 4.0];
|
||||
let emb = QuantumEmbedding::from_embedding(&original, 0.1);
|
||||
let recovered = emb.to_embedding();
|
||||
assert_eq!(recovered.len(), original.len());
|
||||
// Should be the normalised version of the original
|
||||
let norm = (3.0f64 * 3.0 + 4.0 * 4.0).sqrt();
|
||||
assert!((recovered[0] - 3.0 / norm).abs() < 1e-10);
|
||||
assert!((recovered[1] - 4.0 / norm).abs() < 1e-10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_coherent_respects_threshold() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&sample_embedding(4), 1.0);
|
||||
assert!(emb.is_coherent(0.9));
|
||||
// Decohere heavily
|
||||
for i in 0..10 {
|
||||
emb.decohere(1.0, 200 + i);
|
||||
}
|
||||
assert!(!emb.is_coherent(0.99));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decohere_batch_filters_correctly() {
|
||||
let mut batch: Vec<QuantumEmbedding> = (0..5)
|
||||
.map(|i| {
|
||||
QuantumEmbedding::from_embedding(
|
||||
&sample_embedding(4),
|
||||
// Higher noise rate for later embeddings
|
||||
0.1 * (i as f64 + 1.0),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let coherent = decohere_batch(&mut batch, 1.0, 0.3, 999);
|
||||
// Embeddings with lower noise rates should remain coherent longer
|
||||
// At least the lowest-noise-rate embedding should survive
|
||||
assert!(
|
||||
!coherent.is_empty(),
|
||||
"at least some embeddings should remain coherent with mild decoherence"
|
||||
);
|
||||
// The first embedding (lowest noise) should be the most likely to survive
|
||||
if !coherent.is_empty() {
|
||||
assert_eq!(coherent[0], 0, "lowest-noise embedding should survive");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_embedding_handled() {
|
||||
let emb = QuantumEmbedding::from_embedding(&[], 0.1);
|
||||
assert!((emb.fidelity() - 1.0).abs() < 1e-10);
|
||||
let recovered = emb.to_embedding();
|
||||
// original_dim is max(0, 1) = 1
|
||||
assert_eq!(recovered.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zero_noise_rate_preserves_fidelity() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&sample_embedding(4), 0.0);
|
||||
emb.decohere(10.0, 42);
|
||||
// With noise_rate=0, gamma=0 and phase_scale=0, so no change
|
||||
assert!(
|
||||
(emb.fidelity() - 1.0).abs() < 1e-10,
|
||||
"zero noise rate should preserve fidelity perfectly"
|
||||
);
|
||||
}
|
||||
}
|
||||
310
vendor/ruvector/crates/ruqu-exotic/src/reality_check.rs
vendored
Normal file
310
vendor/ruvector/crates/ruqu-exotic/src/reality_check.rs
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
//! # Browser-Native Quantum Reality Checks
|
||||
//!
|
||||
//! Verification circuits that let users test quantum claims locally.
|
||||
//! If an AI says behavior is quantum-inspired, the user can verify it
|
||||
//! against actual quantum mechanics in the browser.
|
||||
//!
|
||||
//! Collapses the gap between explanation and verification.
|
||||
|
||||
use ruqu_core::error::QuantumError;
|
||||
use ruqu_core::gate::Gate;
|
||||
use ruqu_core::state::QuantumState;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// What property we expect to verify.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ExpectedProperty {
|
||||
/// P(qubit = 0) ≈ expected ± tolerance
|
||||
ProbabilityZero {
|
||||
qubit: u32,
|
||||
expected: f64,
|
||||
tolerance: f64,
|
||||
},
|
||||
/// P(qubit = 1) ≈ expected ± tolerance
|
||||
ProbabilityOne {
|
||||
qubit: u32,
|
||||
expected: f64,
|
||||
tolerance: f64,
|
||||
},
|
||||
/// Two qubits are entangled: P(same outcome) > min_correlation
|
||||
Entangled {
|
||||
qubit_a: u32,
|
||||
qubit_b: u32,
|
||||
min_correlation: f64,
|
||||
},
|
||||
/// Qubit is in equal superposition: P(1) ≈ 0.5 ± tolerance
|
||||
EqualSuperposition { qubit: u32, tolerance: f64 },
|
||||
/// Full probability distribution matches ± tolerance
|
||||
InterferencePattern {
|
||||
probabilities: Vec<f64>,
|
||||
tolerance: f64,
|
||||
},
|
||||
}
|
||||
|
||||
/// A quantum reality check: a named verification experiment.
|
||||
pub struct RealityCheck {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub num_qubits: u32,
|
||||
pub expected: ExpectedProperty,
|
||||
}
|
||||
|
||||
/// Result of running a reality check.
|
||||
#[derive(Debug)]
|
||||
pub struct CheckResult {
|
||||
pub check_name: String,
|
||||
pub passed: bool,
|
||||
pub measured_value: f64,
|
||||
pub expected_value: f64,
|
||||
pub detail: String,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Verification engine
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Run a verification circuit and check the expected property.
|
||||
pub fn run_check<F>(check: &RealityCheck, circuit_fn: F) -> Result<CheckResult, QuantumError>
|
||||
where
|
||||
F: FnOnce(&mut QuantumState) -> Result<(), QuantumError>,
|
||||
{
|
||||
let mut state = QuantumState::new(check.num_qubits)?;
|
||||
circuit_fn(&mut state)?;
|
||||
|
||||
let probs = state.probabilities();
|
||||
|
||||
match &check.expected {
|
||||
ExpectedProperty::ProbabilityZero {
|
||||
qubit,
|
||||
expected,
|
||||
tolerance,
|
||||
} => {
|
||||
let p0 = 1.0 - state.probability_of_qubit(*qubit);
|
||||
let pass = (p0 - expected).abs() <= *tolerance;
|
||||
Ok(CheckResult {
|
||||
check_name: check.name.clone(),
|
||||
passed: pass,
|
||||
measured_value: p0,
|
||||
expected_value: *expected,
|
||||
detail: format!(
|
||||
"P(q{}=0) = {:.6}, expected {:.6} +/- {:.6}",
|
||||
qubit, p0, expected, tolerance
|
||||
),
|
||||
})
|
||||
}
|
||||
ExpectedProperty::ProbabilityOne {
|
||||
qubit,
|
||||
expected,
|
||||
tolerance,
|
||||
} => {
|
||||
let p1 = state.probability_of_qubit(*qubit);
|
||||
let pass = (p1 - expected).abs() <= *tolerance;
|
||||
Ok(CheckResult {
|
||||
check_name: check.name.clone(),
|
||||
passed: pass,
|
||||
measured_value: p1,
|
||||
expected_value: *expected,
|
||||
detail: format!(
|
||||
"P(q{}=1) = {:.6}, expected {:.6} +/- {:.6}",
|
||||
qubit, p1, expected, tolerance
|
||||
),
|
||||
})
|
||||
}
|
||||
ExpectedProperty::Entangled {
|
||||
qubit_a,
|
||||
qubit_b,
|
||||
min_correlation,
|
||||
} => {
|
||||
// Correlation = P(same outcome) = P(00) + P(11)
|
||||
let bit_a = 1usize << qubit_a;
|
||||
let bit_b = 1usize << qubit_b;
|
||||
let mut p_same = 0.0;
|
||||
for (i, &p) in probs.iter().enumerate() {
|
||||
let a = (i & bit_a) != 0;
|
||||
let b = (i & bit_b) != 0;
|
||||
if a == b {
|
||||
p_same += p;
|
||||
}
|
||||
}
|
||||
let pass = p_same >= *min_correlation;
|
||||
Ok(CheckResult {
|
||||
check_name: check.name.clone(),
|
||||
passed: pass,
|
||||
measured_value: p_same,
|
||||
expected_value: *min_correlation,
|
||||
detail: format!(
|
||||
"P(q{}==q{}) = {:.6}, min {:.6}",
|
||||
qubit_a, qubit_b, p_same, min_correlation
|
||||
),
|
||||
})
|
||||
}
|
||||
ExpectedProperty::EqualSuperposition { qubit, tolerance } => {
|
||||
let p1 = state.probability_of_qubit(*qubit);
|
||||
let pass = (p1 - 0.5).abs() <= *tolerance;
|
||||
Ok(CheckResult {
|
||||
check_name: check.name.clone(),
|
||||
passed: pass,
|
||||
measured_value: p1,
|
||||
expected_value: 0.5,
|
||||
detail: format!(
|
||||
"P(q{}=1) = {:.6}, expected 0.5 +/- {:.6}",
|
||||
qubit, p1, tolerance
|
||||
),
|
||||
})
|
||||
}
|
||||
ExpectedProperty::InterferencePattern {
|
||||
probabilities: expected_probs,
|
||||
tolerance,
|
||||
} => {
|
||||
let max_diff: f64 = probs
|
||||
.iter()
|
||||
.zip(expected_probs.iter())
|
||||
.map(|(a, b)| (a - b).abs())
|
||||
.fold(0.0_f64, f64::max);
|
||||
let pass = max_diff <= *tolerance;
|
||||
Ok(CheckResult {
|
||||
check_name: check.name.clone(),
|
||||
passed: pass,
|
||||
measured_value: max_diff,
|
||||
expected_value: 0.0,
|
||||
detail: format!(
|
||||
"max |p_measured - p_expected| = {:.6}, tolerance {:.6}",
|
||||
max_diff, tolerance
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Built-in verification circuits
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Verify superposition: H|0⟩ should give 50/50.
|
||||
pub fn check_superposition() -> CheckResult {
|
||||
let check = RealityCheck {
|
||||
name: "Superposition".into(),
|
||||
description: "H|0> produces equal superposition".into(),
|
||||
num_qubits: 1,
|
||||
expected: ExpectedProperty::EqualSuperposition {
|
||||
qubit: 0,
|
||||
tolerance: 1e-10,
|
||||
},
|
||||
};
|
||||
run_check(&check, |state| {
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Verify entanglement: Bell state |00⟩ + |11⟩ has perfect correlation.
|
||||
pub fn check_entanglement() -> CheckResult {
|
||||
let check = RealityCheck {
|
||||
name: "Entanglement".into(),
|
||||
description: "Bell state has perfectly correlated measurements".into(),
|
||||
num_qubits: 2,
|
||||
expected: ExpectedProperty::Entangled {
|
||||
qubit_a: 0,
|
||||
qubit_b: 1,
|
||||
min_correlation: 0.99,
|
||||
},
|
||||
};
|
||||
run_check(&check, |state| {
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
state.apply_gate(&Gate::CNOT(0, 1))?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Verify interference: H-Z-H = X, so |0⟩ → |1⟩.
|
||||
/// Destructive interference on |0⟩, constructive on |1⟩.
|
||||
pub fn check_interference() -> CheckResult {
|
||||
let check = RealityCheck {
|
||||
name: "Interference".into(),
|
||||
description: "H-Z-H = X: destructive interference eliminates |0>".into(),
|
||||
num_qubits: 1,
|
||||
expected: ExpectedProperty::ProbabilityOne {
|
||||
qubit: 0,
|
||||
expected: 1.0,
|
||||
tolerance: 1e-10,
|
||||
},
|
||||
};
|
||||
run_check(&check, |state| {
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
state.apply_gate(&Gate::Z(0))?;
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Verify phase kickback: Deutsch's algorithm for balanced f(x)=x.
|
||||
/// Query qubit should measure |1⟩ with certainty.
|
||||
pub fn check_phase_kickback() -> CheckResult {
|
||||
let check = RealityCheck {
|
||||
name: "Phase Kickback".into(),
|
||||
description: "Deutsch oracle for f(x)=x: phase kickback produces |1> on query qubit".into(),
|
||||
num_qubits: 2,
|
||||
expected: ExpectedProperty::ProbabilityOne {
|
||||
qubit: 0,
|
||||
expected: 1.0,
|
||||
tolerance: 1e-10,
|
||||
},
|
||||
};
|
||||
run_check(&check, |state| {
|
||||
// Prepare |01⟩
|
||||
state.apply_gate(&Gate::X(1))?;
|
||||
// Hadamard both
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
state.apply_gate(&Gate::H(1))?;
|
||||
// Oracle: f(x) = x → CNOT
|
||||
state.apply_gate(&Gate::CNOT(0, 1))?;
|
||||
// Final Hadamard on query
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Verify no-cloning: CNOT cannot copy a superposition.
|
||||
/// If |ψ⟩ = H|0⟩ = |+⟩, then CNOT(0,1)|+,0⟩ = (|00⟩+|11⟩)/√2 (Bell state),
|
||||
/// NOT |+,+⟩ = (|00⟩+|01⟩+|10⟩+|11⟩)/2.
|
||||
///
|
||||
/// We detect this by checking that qubit 1 is NOT in an equal superposition
|
||||
/// independently — it is entangled with qubit 0, not an independent copy.
|
||||
pub fn check_no_cloning() -> CheckResult {
|
||||
let check = RealityCheck {
|
||||
name: "No-Cloning".into(),
|
||||
description:
|
||||
"CNOT cannot independently copy a superposition (produces entanglement instead)".into(),
|
||||
num_qubits: 2,
|
||||
expected: ExpectedProperty::InterferencePattern {
|
||||
// Bell state: P(00) = 0.5, P(01) = 0, P(10) = 0, P(11) = 0.5
|
||||
// If cloning worked: P(00) = 0.25, P(01) = 0.25, P(10) = 0.25, P(11) = 0.25
|
||||
probabilities: vec![0.5, 0.0, 0.0, 0.5],
|
||||
tolerance: 1e-10,
|
||||
},
|
||||
};
|
||||
run_check(&check, |state| {
|
||||
state.apply_gate(&Gate::H(0))?;
|
||||
state.apply_gate(&Gate::CNOT(0, 1))?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Run all built-in checks and return results.
|
||||
pub fn run_all_checks() -> Vec<CheckResult> {
|
||||
vec![
|
||||
check_superposition(),
|
||||
check_entanglement(),
|
||||
check_interference(),
|
||||
check_phase_kickback(),
|
||||
check_no_cloning(),
|
||||
]
|
||||
}
|
||||
351
vendor/ruvector/crates/ruqu-exotic/src/reasoning_qec.rs
vendored
Normal file
351
vendor/ruvector/crates/ruqu-exotic/src/reasoning_qec.rs
vendored
Normal file
@@ -0,0 +1,351 @@
|
||||
//! # Reasoning QEC -- Quantum Error Correction for Reasoning Traces
|
||||
//!
|
||||
//! Treats reasoning steps like qubits. Each step is encoded as a quantum state
|
||||
//! (high confidence = close to |0>, low confidence = rotated toward |1>).
|
||||
//! Noise is injected to simulate reasoning errors, then a repetition-code-style
|
||||
//! syndrome extraction detects when adjacent steps become incoherent.
|
||||
//!
|
||||
//! This provides **structural** reasoning integrity checks, not semantic ones.
|
||||
//! The 1D repetition code uses:
|
||||
//! - N data qubits (one per reasoning step)
|
||||
//! - N-1 ancilla qubits (parity checks between adjacent steps)
|
||||
//! - Total: 2N - 1 qubits (maximum N = 13 to stay within 25-qubit limit)
|
||||
|
||||
use ruqu_core::error::QuantumError;
|
||||
use ruqu_core::gate::Gate;
|
||||
use ruqu_core::state::QuantumState;
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
/// A single reasoning step encoded as a quantum state.
|
||||
/// The step is either "valid" (close to |0>) or "flawed" (close to |1>).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ReasoningStep {
|
||||
pub label: String,
|
||||
pub confidence: f64, // 0.0 = completely uncertain, 1.0 = fully confident
|
||||
}
|
||||
|
||||
/// Configuration for reasoning QEC
|
||||
pub struct ReasoningQecConfig {
|
||||
/// Number of reasoning steps (data qubits)
|
||||
pub num_steps: usize,
|
||||
/// Noise rate per step (probability of error per step)
|
||||
pub noise_rate: f64,
|
||||
/// Seed for reproducibility
|
||||
pub seed: Option<u64>,
|
||||
}
|
||||
|
||||
/// Result of a reasoning QEC analysis
|
||||
#[derive(Debug)]
|
||||
pub struct ReasoningQecResult {
|
||||
/// Which steps had errors detected (indices)
|
||||
pub error_steps: Vec<usize>,
|
||||
/// Syndrome bits (one per stabilizer)
|
||||
pub syndrome: Vec<bool>,
|
||||
/// Whether the overall reasoning trace is decodable (correctable)
|
||||
pub is_decodable: bool,
|
||||
/// Fidelity of the reasoning trace after correction
|
||||
pub corrected_fidelity: f64,
|
||||
/// Number of steps total
|
||||
pub num_steps: usize,
|
||||
}
|
||||
|
||||
/// A reasoning trace with QEC-style error detection.
|
||||
///
|
||||
/// Maps reasoning steps to a 1D repetition code:
|
||||
/// - Each step is a data qubit
|
||||
/// - Stabilizers check parity between adjacent steps
|
||||
/// - If adjacent steps disagree (one flipped, one not), syndrome fires
|
||||
///
|
||||
/// This is simpler than a full surface code but captures the key idea:
|
||||
/// structural detection of reasoning incoherence.
|
||||
pub struct ReasoningTrace {
|
||||
steps: Vec<ReasoningStep>,
|
||||
state: QuantumState,
|
||||
config: ReasoningQecConfig,
|
||||
}
|
||||
|
||||
impl ReasoningTrace {
|
||||
/// Create a new reasoning trace from steps.
|
||||
/// Each step's confidence maps to a rotation: high confidence = close to |0>.
|
||||
/// Total qubits = num_steps (data) + (num_steps - 1) (ancilla for parity checks)
|
||||
pub fn new(
|
||||
steps: Vec<ReasoningStep>,
|
||||
config: ReasoningQecConfig,
|
||||
) -> Result<Self, QuantumError> {
|
||||
let num_steps = steps.len();
|
||||
|
||||
if num_steps == 0 {
|
||||
return Err(QuantumError::CircuitError(
|
||||
"reasoning trace requires at least one step".into(),
|
||||
));
|
||||
}
|
||||
|
||||
// Total qubits: data (0..num_steps) + ancillas (num_steps..2*num_steps-1)
|
||||
let total_qubits = (2 * num_steps - 1) as u32;
|
||||
|
||||
// Check qubit limit early (MAX_QUBITS = 25)
|
||||
if total_qubits > 25 {
|
||||
return Err(QuantumError::QubitLimitExceeded {
|
||||
requested: total_qubits,
|
||||
maximum: 25,
|
||||
});
|
||||
}
|
||||
|
||||
let seed = config.seed.unwrap_or(42);
|
||||
let mut state = QuantumState::new_with_seed(total_qubits, seed)?;
|
||||
|
||||
// Encode each step: rotate by angle based on confidence
|
||||
// confidence=1.0 -> |0> (no rotation), confidence=0.0 -> equal superposition (pi/2)
|
||||
for (i, step) in steps.iter().enumerate() {
|
||||
let angle = std::f64::consts::FRAC_PI_2 * (1.0 - step.confidence);
|
||||
if angle.abs() > 1e-15 {
|
||||
state.apply_gate(&Gate::Ry(i as u32, angle))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
steps,
|
||||
state,
|
||||
config,
|
||||
})
|
||||
}
|
||||
|
||||
/// Inject noise into the reasoning trace.
|
||||
/// Each step independently suffers a bit flip (X error) with probability noise_rate.
|
||||
pub fn inject_noise(&mut self) -> Result<(), QuantumError> {
|
||||
let seed = self.config.seed.unwrap_or(42).wrapping_add(12345);
|
||||
let mut rng = StdRng::seed_from_u64(seed);
|
||||
for i in 0..self.steps.len() {
|
||||
if rng.gen::<f64>() < self.config.noise_rate {
|
||||
self.state.apply_gate(&Gate::X(i as u32))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Extract syndrome by checking parity between adjacent reasoning steps.
|
||||
/// Uses ancilla qubits to perform non-destructive parity measurement.
|
||||
/// Syndrome bit i fires if steps i and i+1 disagree (ZZ stabilizer = -1).
|
||||
pub fn extract_syndrome(&mut self) -> Result<Vec<bool>, QuantumError> {
|
||||
let num_steps = self.steps.len();
|
||||
let mut syndrome = Vec::with_capacity(num_steps.saturating_sub(1));
|
||||
|
||||
for i in 0..(num_steps - 1) {
|
||||
let data1 = i as u32;
|
||||
let data2 = (i + 1) as u32;
|
||||
let ancilla = (num_steps + i) as u32;
|
||||
|
||||
// Reset ancilla to |0>
|
||||
self.state.reset_qubit(ancilla)?;
|
||||
|
||||
// CNOT from data1 to ancilla, CNOT from data2 to ancilla
|
||||
// Ancilla will be |1> if data1 != data2
|
||||
self.state.apply_gate(&Gate::CNOT(data1, ancilla))?;
|
||||
self.state.apply_gate(&Gate::CNOT(data2, ancilla))?;
|
||||
|
||||
// Measure ancilla
|
||||
let outcome = self.state.measure(ancilla)?;
|
||||
syndrome.push(outcome.result);
|
||||
}
|
||||
|
||||
Ok(syndrome)
|
||||
}
|
||||
|
||||
/// Decode syndrome and attempt correction.
|
||||
/// Simple decoder: if syndrome\[i\] fires, flip step i+1 (rightmost error assumption).
|
||||
pub fn decode_and_correct(&mut self, syndrome: &[bool]) -> Result<Vec<usize>, QuantumError> {
|
||||
let mut corrected = Vec::new();
|
||||
// Simple decoder: for each fired syndrome, the error is likely
|
||||
// between the two data qubits. Correct the right one.
|
||||
for (i, &fired) in syndrome.iter().enumerate() {
|
||||
if fired {
|
||||
let step_to_correct = i + 1;
|
||||
self.state.apply_gate(&Gate::X(step_to_correct as u32))?;
|
||||
corrected.push(step_to_correct);
|
||||
}
|
||||
}
|
||||
Ok(corrected)
|
||||
}
|
||||
|
||||
/// Run the full QEC pipeline: inject noise, extract syndrome, decode, correct.
|
||||
pub fn run_qec(&mut self) -> Result<ReasoningQecResult, QuantumError> {
|
||||
// Save state before noise for fidelity comparison
|
||||
let clean_sv: Vec<Complex> = self.state.state_vector().to_vec();
|
||||
let clean_state = QuantumState::from_amplitudes(clean_sv, self.state.num_qubits())?;
|
||||
|
||||
// Inject noise
|
||||
self.inject_noise()?;
|
||||
|
||||
// Extract syndrome
|
||||
let syndrome = self.extract_syndrome()?;
|
||||
|
||||
// Determine which steps have errors
|
||||
let mut error_steps = Vec::new();
|
||||
for (i, &s) in syndrome.iter().enumerate() {
|
||||
if s {
|
||||
error_steps.push(i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
let is_decodable = error_steps.len() <= self.steps.len() / 2;
|
||||
|
||||
// Attempt correction
|
||||
if is_decodable {
|
||||
self.decode_and_correct(&syndrome)?;
|
||||
}
|
||||
|
||||
let corrected_fidelity = self.state.fidelity(&clean_state);
|
||||
|
||||
Ok(ReasoningQecResult {
|
||||
error_steps,
|
||||
syndrome,
|
||||
is_decodable,
|
||||
corrected_fidelity,
|
||||
num_steps: self.steps.len(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the number of reasoning steps
|
||||
pub fn num_steps(&self) -> usize {
|
||||
self.steps.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_steps(n: usize, confidence: f64) -> Vec<ReasoningStep> {
|
||||
(0..n)
|
||||
.map(|i| ReasoningStep {
|
||||
label: format!("step_{}", i),
|
||||
confidence,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_creates_trace() {
|
||||
let steps = make_steps(5, 1.0);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let trace = ReasoningTrace::new(steps, config);
|
||||
assert!(trace.is_ok());
|
||||
assert_eq!(trace.unwrap().num_steps(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_steps_rejected() {
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 0,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let result = ReasoningTrace::new(vec![], config);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qubit_limit_exceeded() {
|
||||
// 14 steps -> 2*14-1 = 27 qubits > 25
|
||||
let steps = make_steps(14, 1.0);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 14,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let result = ReasoningTrace::new(steps, config);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_allowed_steps() {
|
||||
// 13 steps -> 2*13-1 = 25 qubits = MAX_QUBITS (should succeed)
|
||||
let steps = make_steps(13, 1.0);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 13,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let result = ReasoningTrace::new(steps, config);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_noise_no_syndrome() {
|
||||
let steps = make_steps(5, 1.0);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let syndrome = trace.extract_syndrome().unwrap();
|
||||
// All steps fully confident (|0>) and no noise: parity checks should not fire
|
||||
assert!(syndrome.iter().all(|&s| !s));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_qec_zero_noise() {
|
||||
let steps = make_steps(5, 1.0);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let result = trace.run_qec().unwrap();
|
||||
assert!(result.error_steps.is_empty());
|
||||
assert!(result.is_decodable);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_qec_with_noise() {
|
||||
let steps = make_steps(5, 1.0);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.5,
|
||||
seed: Some(100),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let result = trace.run_qec().unwrap();
|
||||
assert_eq!(result.num_steps, 5);
|
||||
// Syndrome length = num_steps - 1
|
||||
assert_eq!(result.syndrome.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_step_trace() {
|
||||
let steps = make_steps(1, 0.8);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 1,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let syndrome = trace.extract_syndrome().unwrap();
|
||||
// Single step -> no parity checks -> empty syndrome
|
||||
assert!(syndrome.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_confidence_encoding() {
|
||||
// Steps with 50% confidence should produce superposition states
|
||||
let steps = make_steps(3, 0.5);
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 3,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
// State should not be purely |000...0>
|
||||
let probs = trace.state.probabilities();
|
||||
assert!(probs[0] < 1.0);
|
||||
}
|
||||
}
|
||||
275
vendor/ruvector/crates/ruqu-exotic/src/reversible_memory.rs
vendored
Normal file
275
vendor/ruvector/crates/ruqu-exotic/src/reversible_memory.rs
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
//! # Time-Reversible Quantum Memory
|
||||
//!
|
||||
//! Because the simulator has full state access and all quantum gates are
|
||||
//! unitary (and therefore invertible), we can **rewind** evolution.
|
||||
//!
|
||||
//! This enables counterfactual debugging: "What would this system have
|
||||
//! believed if one observation was missing?"
|
||||
//!
|
||||
//! Most ML systems are forward-only. This is backward-capable.
|
||||
|
||||
use ruqu_core::error::QuantumError;
|
||||
use ruqu_core::gate::Gate;
|
||||
use ruqu_core::state::QuantumState;
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Gate inversion
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Compute the inverse of a unitary gate.
|
||||
///
|
||||
/// Self-inverse gates (X, Y, Z, H, CNOT, CZ, SWAP) return themselves.
|
||||
/// Rotation gates negate their angle. S↔S†, T↔T†.
|
||||
/// Non-unitary operations (Measure, Reset, Barrier) cannot be inverted.
|
||||
pub fn inverse_gate(gate: &Gate) -> Result<Gate, QuantumError> {
|
||||
match gate {
|
||||
// Self-inverse
|
||||
Gate::X(q) => Ok(Gate::X(*q)),
|
||||
Gate::Y(q) => Ok(Gate::Y(*q)),
|
||||
Gate::Z(q) => Ok(Gate::Z(*q)),
|
||||
Gate::H(q) => Ok(Gate::H(*q)),
|
||||
Gate::CNOT(a, b) => Ok(Gate::CNOT(*a, *b)),
|
||||
Gate::CZ(a, b) => Ok(Gate::CZ(*a, *b)),
|
||||
Gate::SWAP(a, b) => Ok(Gate::SWAP(*a, *b)),
|
||||
|
||||
// Rotation inverses: negate angle
|
||||
Gate::Rx(q, t) => Ok(Gate::Rx(*q, -*t)),
|
||||
Gate::Ry(q, t) => Ok(Gate::Ry(*q, -*t)),
|
||||
Gate::Rz(q, t) => Ok(Gate::Rz(*q, -*t)),
|
||||
Gate::Phase(q, t) => Ok(Gate::Phase(*q, -*t)),
|
||||
Gate::Rzz(a, b, t) => Ok(Gate::Rzz(*a, *b, -*t)),
|
||||
|
||||
// Adjoint pairs
|
||||
Gate::S(q) => Ok(Gate::Sdg(*q)),
|
||||
Gate::Sdg(q) => Ok(Gate::S(*q)),
|
||||
Gate::T(q) => Ok(Gate::Tdg(*q)),
|
||||
Gate::Tdg(q) => Ok(Gate::T(*q)),
|
||||
|
||||
// Custom unitary: conjugate transpose
|
||||
Gate::Unitary1Q(q, m) => {
|
||||
let inv = [
|
||||
[m[0][0].conj(), m[1][0].conj()],
|
||||
[m[0][1].conj(), m[1][1].conj()],
|
||||
];
|
||||
Ok(Gate::Unitary1Q(*q, inv))
|
||||
}
|
||||
|
||||
// Non-unitary: cannot invert
|
||||
Gate::Measure(_) | Gate::Reset(_) | Gate::Barrier => Err(QuantumError::CircuitError(
|
||||
"cannot invert non-unitary gate (Measure/Reset/Barrier)".into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Reversible memory
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A recorded gate with its precomputed inverse.
|
||||
#[derive(Clone)]
|
||||
struct GateRecord {
|
||||
gate: Gate,
|
||||
inverse: Gate,
|
||||
}
|
||||
|
||||
/// Quantum memory that records all operations and can rewind them.
|
||||
///
|
||||
/// Every [`apply`] stores the gate and its inverse. [`rewind`] pops the
|
||||
/// last n gates and applies their inverses, restoring an earlier state.
|
||||
/// [`counterfactual`] replays history with one step omitted.
|
||||
pub struct ReversibleMemory {
|
||||
state: QuantumState,
|
||||
history: Vec<GateRecord>,
|
||||
initial_amps: Vec<Complex>,
|
||||
num_qubits: u32,
|
||||
}
|
||||
|
||||
/// Result of a counterfactual analysis.
|
||||
#[derive(Debug)]
|
||||
pub struct CounterfactualResult {
|
||||
/// Probabilities without the removed step.
|
||||
pub counterfactual_probs: Vec<f64>,
|
||||
/// Probabilities with the step included (original).
|
||||
pub original_probs: Vec<f64>,
|
||||
/// L2 divergence between the two distributions.
|
||||
pub divergence: f64,
|
||||
/// Which step was removed.
|
||||
pub removed_step: usize,
|
||||
}
|
||||
|
||||
/// Sensitivity of each step to perturbation.
|
||||
#[derive(Debug)]
|
||||
pub struct SensitivityResult {
|
||||
/// For each step: 1 − fidelity(perturbed, original).
|
||||
pub sensitivities: Vec<f64>,
|
||||
/// Index of the most sensitive step.
|
||||
pub most_sensitive: usize,
|
||||
/// Index of the least sensitive step.
|
||||
pub least_sensitive: usize,
|
||||
}
|
||||
|
||||
impl ReversibleMemory {
|
||||
/// Create a new reversible memory with `num_qubits` qubits in |0…0⟩.
|
||||
pub fn new(num_qubits: u32) -> Result<Self, QuantumError> {
|
||||
let state = QuantumState::new(num_qubits)?;
|
||||
let initial_amps = state.state_vector().to_vec();
|
||||
Ok(Self {
|
||||
state,
|
||||
history: Vec::new(),
|
||||
initial_amps,
|
||||
num_qubits,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with a deterministic seed.
|
||||
pub fn new_with_seed(num_qubits: u32, seed: u64) -> Result<Self, QuantumError> {
|
||||
let state = QuantumState::new_with_seed(num_qubits, seed)?;
|
||||
let initial_amps = state.state_vector().to_vec();
|
||||
Ok(Self {
|
||||
state,
|
||||
history: Vec::new(),
|
||||
initial_amps,
|
||||
num_qubits,
|
||||
})
|
||||
}
|
||||
|
||||
/// Apply a gate and record it. Non-unitary gates are rejected.
|
||||
pub fn apply(&mut self, gate: Gate) -> Result<(), QuantumError> {
|
||||
let inv = inverse_gate(&gate)?;
|
||||
self.state.apply_gate(&gate)?;
|
||||
self.history.push(GateRecord { gate, inverse: inv });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rewind the last `steps` operations by applying their inverses.
|
||||
/// Returns how many were actually rewound.
|
||||
pub fn rewind(&mut self, steps: usize) -> Result<usize, QuantumError> {
|
||||
let actual = steps.min(self.history.len());
|
||||
for _ in 0..actual {
|
||||
let record = self.history.pop().unwrap();
|
||||
self.state.apply_gate(&record.inverse)?;
|
||||
}
|
||||
Ok(actual)
|
||||
}
|
||||
|
||||
/// Counterfactual: what would the final state be if step `remove_index`
|
||||
/// never happened?
|
||||
///
|
||||
/// Replays the full history from the initial state, skipping the
|
||||
/// specified step, then compares with the original outcome.
|
||||
pub fn counterfactual(
|
||||
&self,
|
||||
remove_index: usize,
|
||||
) -> Result<CounterfactualResult, QuantumError> {
|
||||
if remove_index >= self.history.len() {
|
||||
return Err(QuantumError::CircuitError(format!(
|
||||
"step {} out of range (history has {} steps)",
|
||||
remove_index,
|
||||
self.history.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Replay without the removed step
|
||||
let mut cf_state =
|
||||
QuantumState::from_amplitudes(self.initial_amps.clone(), self.num_qubits)?;
|
||||
for (i, record) in self.history.iter().enumerate() {
|
||||
if i != remove_index {
|
||||
cf_state.apply_gate(&record.gate)?;
|
||||
}
|
||||
}
|
||||
|
||||
let cf_probs = cf_state.probabilities();
|
||||
let orig_probs = self.state.probabilities();
|
||||
|
||||
// L2 divergence
|
||||
let divergence: f64 = orig_probs
|
||||
.iter()
|
||||
.zip(cf_probs.iter())
|
||||
.map(|(a, b)| (a - b) * (a - b))
|
||||
.sum::<f64>()
|
||||
.sqrt();
|
||||
|
||||
Ok(CounterfactualResult {
|
||||
counterfactual_probs: cf_probs,
|
||||
original_probs: orig_probs,
|
||||
divergence,
|
||||
removed_step: remove_index,
|
||||
})
|
||||
}
|
||||
|
||||
/// Sensitivity analysis: for each step, insert a small Rz perturbation
|
||||
/// after it and measure how much the final state diverges.
|
||||
///
|
||||
/// Sensitivity = 1 − fidelity(perturbed_final, original_final).
|
||||
pub fn sensitivity_analysis(
|
||||
&self,
|
||||
perturbation_angle: f64,
|
||||
) -> Result<SensitivityResult, QuantumError> {
|
||||
if self.history.is_empty() {
|
||||
return Ok(SensitivityResult {
|
||||
sensitivities: vec![],
|
||||
most_sensitive: 0,
|
||||
least_sensitive: 0,
|
||||
});
|
||||
}
|
||||
|
||||
let mut sensitivities = Vec::with_capacity(self.history.len());
|
||||
|
||||
for perturb_idx in 0..self.history.len() {
|
||||
let mut perturbed =
|
||||
QuantumState::from_amplitudes(self.initial_amps.clone(), self.num_qubits)?;
|
||||
|
||||
for (i, record) in self.history.iter().enumerate() {
|
||||
perturbed.apply_gate(&record.gate)?;
|
||||
if i == perturb_idx {
|
||||
let q = record.gate.qubits().first().copied().unwrap_or(0);
|
||||
perturbed.apply_gate(&Gate::Rz(q, perturbation_angle))?;
|
||||
}
|
||||
}
|
||||
|
||||
let fid = self.state.fidelity(&perturbed);
|
||||
sensitivities.push(1.0 - fid);
|
||||
}
|
||||
|
||||
let most_sensitive = sensitivities
|
||||
.iter()
|
||||
.enumerate()
|
||||
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
|
||||
.map(|(i, _)| i)
|
||||
.unwrap_or(0);
|
||||
|
||||
let least_sensitive = sensitivities
|
||||
.iter()
|
||||
.enumerate()
|
||||
.min_by(|a, b| a.1.partial_cmp(b.1).unwrap())
|
||||
.map(|(i, _)| i)
|
||||
.unwrap_or(0);
|
||||
|
||||
Ok(SensitivityResult {
|
||||
sensitivities,
|
||||
most_sensitive,
|
||||
least_sensitive,
|
||||
})
|
||||
}
|
||||
|
||||
/// Current state vector.
|
||||
pub fn state_vector(&self) -> &[Complex] {
|
||||
self.state.state_vector()
|
||||
}
|
||||
|
||||
/// Current measurement probabilities.
|
||||
pub fn probabilities(&self) -> Vec<f64> {
|
||||
self.state.probabilities()
|
||||
}
|
||||
|
||||
/// Number of recorded operations.
|
||||
pub fn history_len(&self) -> usize {
|
||||
self.history.len()
|
||||
}
|
||||
|
||||
/// Number of qubits.
|
||||
pub fn num_qubits(&self) -> u32 {
|
||||
self.num_qubits
|
||||
}
|
||||
}
|
||||
499
vendor/ruvector/crates/ruqu-exotic/src/swarm_interference.rs
vendored
Normal file
499
vendor/ruvector/crates/ruqu-exotic/src/swarm_interference.rs
vendored
Normal file
@@ -0,0 +1,499 @@
|
||||
//! # Swarm Interference
|
||||
//!
|
||||
//! Agents don't vote -- they *interfere*. Each agent contributes a complex
|
||||
//! amplitude toward one or more actions. Conflicting agents cancel
|
||||
//! (destructive interference). Reinforcing agents amplify (constructive
|
||||
//! interference). The decision emerges from the interference pattern, not
|
||||
//! from a majority vote or consensus protocol.
|
||||
//!
|
||||
//! ## Model
|
||||
//!
|
||||
//! - **Action**: something the swarm can do, identified by an `id` string.
|
||||
//! - **Agent contribution**: a complex amplitude per action. The *magnitude*
|
||||
//! encodes confidence; the *phase* encodes stance (0 = support, pi = oppose).
|
||||
//! - **Decision**: for each action, sum all contributing amplitudes. The
|
||||
//! resulting probability |sum|^2 determines the action's strength.
|
||||
//!
|
||||
//! Destructive interference naturally resolves conflicts: an action backed
|
||||
//! by 3 agents at phase 0 and opposed by 3 agents at phase pi has zero net
|
||||
//! amplitude, so it is detected as a deadlock.
|
||||
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use std::collections::HashMap;
|
||||
use std::f64::consts::PI;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// An action that agents can support or oppose.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Action {
|
||||
pub id: String,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// An agent's complex-amplitude contribution to one or more actions.
|
||||
///
|
||||
/// The amplitude encodes both confidence (magnitude) and stance (phase).
|
||||
/// Phase 0 = full support, phase pi = full opposition.
|
||||
pub struct AgentContribution {
|
||||
pub agent_id: String,
|
||||
pub amplitudes: Vec<(Action, Complex)>,
|
||||
}
|
||||
|
||||
impl AgentContribution {
|
||||
/// Create a contribution where the agent supports or opposes a single
|
||||
/// action with the given confidence.
|
||||
///
|
||||
/// - `confidence` in `[0, 1]` sets the magnitude.
|
||||
/// - `support = true` => phase 0 (constructive with other supporters).
|
||||
/// - `support = false` => phase pi (destructive against supporters).
|
||||
pub fn new(agent_id: &str, action: Action, confidence: f64, support: bool) -> Self {
|
||||
let phase = if support { 0.0 } else { PI };
|
||||
let amplitude = Complex::from_polar(confidence, phase);
|
||||
Self {
|
||||
agent_id: agent_id.to_string(),
|
||||
amplitudes: vec![(action, amplitude)],
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a contribution spanning multiple actions with explicit complex
|
||||
/// amplitudes.
|
||||
pub fn multi(agent_id: &str, amplitudes: Vec<(Action, Complex)>) -> Self {
|
||||
Self {
|
||||
agent_id: agent_id.to_string(),
|
||||
amplitudes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The swarm decision engine using quantum interference.
|
||||
pub struct SwarmInterference {
|
||||
contributions: Vec<AgentContribution>,
|
||||
}
|
||||
|
||||
/// Result of swarm interference for a single action.
|
||||
#[derive(Debug)]
|
||||
pub struct SwarmDecision {
|
||||
/// The action evaluated.
|
||||
pub action: Action,
|
||||
/// |total_amplitude|^2 after interference.
|
||||
pub probability: f64,
|
||||
/// Number of agents whose phase reinforced the net amplitude.
|
||||
pub constructive_count: usize,
|
||||
/// Number of agents whose phase opposed the net amplitude.
|
||||
pub destructive_count: usize,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Implementation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
impl SwarmInterference {
|
||||
/// Create an empty swarm interference engine.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
contributions: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an agent's contribution to the interference pattern.
|
||||
pub fn contribute(&mut self, contribution: AgentContribution) {
|
||||
self.contributions.push(contribution);
|
||||
}
|
||||
|
||||
/// Compute the interference pattern across all agents for all actions.
|
||||
///
|
||||
/// For each unique action (matched by `action.id`):
|
||||
/// 1. Sum all agent amplitudes (complex addition).
|
||||
/// 2. Compute probability = |sum|^2.
|
||||
/// 3. Classify each contributing agent as constructive or destructive
|
||||
/// relative to the net amplitude's phase.
|
||||
///
|
||||
/// Returns actions sorted by probability (descending).
|
||||
pub fn decide(&self) -> Vec<SwarmDecision> {
|
||||
let (action_map, amplitude_map, agent_phases_map) = self.aggregate();
|
||||
|
||||
let mut decisions: Vec<SwarmDecision> = amplitude_map
|
||||
.into_iter()
|
||||
.map(|(id, total)| {
|
||||
let probability = total.norm_sq();
|
||||
let net_phase = total.arg();
|
||||
|
||||
// Count constructive vs destructive contributors.
|
||||
let phases = agent_phases_map.get(&id).unwrap();
|
||||
let mut constructive = 0usize;
|
||||
let mut destructive = 0usize;
|
||||
|
||||
for &agent_phase in phases {
|
||||
let delta = Self::phase_distance(agent_phase, net_phase);
|
||||
if delta <= PI / 2.0 {
|
||||
constructive += 1;
|
||||
} else {
|
||||
destructive += 1;
|
||||
}
|
||||
}
|
||||
|
||||
SwarmDecision {
|
||||
action: action_map[&id].clone(),
|
||||
probability,
|
||||
constructive_count: constructive,
|
||||
destructive_count: destructive,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
decisions.sort_by(|a, b| {
|
||||
b.probability
|
||||
.partial_cmp(&a.probability)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
decisions
|
||||
}
|
||||
|
||||
/// Return the winning action (highest probability after interference).
|
||||
pub fn winner(&self) -> Option<SwarmDecision> {
|
||||
let decisions = self.decide();
|
||||
decisions.into_iter().next()
|
||||
}
|
||||
|
||||
/// Run `num_trials` decisions with additive quantum noise and return
|
||||
/// win counts: `Vec<(Action, wins)>` sorted by wins descending.
|
||||
///
|
||||
/// Each trial adds a small random complex perturbation to every agent
|
||||
/// amplitude before summing. This models environmental noise and shows
|
||||
/// the stability of the interference pattern.
|
||||
pub fn decide_with_noise(
|
||||
&self,
|
||||
noise_level: f64,
|
||||
num_trials: usize,
|
||||
seed: u64,
|
||||
) -> Vec<(Action, usize)> {
|
||||
let mut rng = StdRng::seed_from_u64(seed);
|
||||
let mut win_counts: HashMap<String, (Action, usize)> = HashMap::new();
|
||||
|
||||
for _ in 0..num_trials {
|
||||
// Aggregate with noise.
|
||||
let mut amplitude_map: HashMap<String, Complex> = HashMap::new();
|
||||
let mut action_map: HashMap<String, Action> = HashMap::new();
|
||||
|
||||
for contrib in &self.contributions {
|
||||
for (action, amp) in &contrib.amplitudes {
|
||||
action_map
|
||||
.entry(action.id.clone())
|
||||
.or_insert_with(|| action.clone());
|
||||
|
||||
// Add noise: random complex perturbation with magnitude up
|
||||
// to `noise_level`.
|
||||
let noise_r = rng.gen::<f64>() * noise_level;
|
||||
let noise_theta = rng.gen::<f64>() * 2.0 * PI;
|
||||
let noise = Complex::from_polar(noise_r, noise_theta);
|
||||
let noisy_amp = *amp + noise;
|
||||
|
||||
let entry = amplitude_map
|
||||
.entry(action.id.clone())
|
||||
.or_insert(Complex::ZERO);
|
||||
*entry = *entry + noisy_amp;
|
||||
}
|
||||
}
|
||||
|
||||
// Find winner for this trial.
|
||||
if let Some((winner_id, _)) = amplitude_map.iter().max_by(|a, b| {
|
||||
a.1.norm_sq()
|
||||
.partial_cmp(&b.1.norm_sq())
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
}) {
|
||||
let entry = win_counts
|
||||
.entry(winner_id.clone())
|
||||
.or_insert_with(|| (action_map[winner_id].clone(), 0));
|
||||
entry.1 += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<(Action, usize)> = win_counts.into_values().collect();
|
||||
result.sort_by(|a, b| b.1.cmp(&a.1));
|
||||
result
|
||||
}
|
||||
|
||||
/// Check if the decision is deadlocked.
|
||||
///
|
||||
/// A deadlock is detected when the top two actions have probabilities
|
||||
/// within `epsilon` of each other.
|
||||
pub fn is_deadlocked(&self, epsilon: f64) -> bool {
|
||||
let decisions = self.decide();
|
||||
if decisions.len() < 2 {
|
||||
return false;
|
||||
}
|
||||
(decisions[0].probability - decisions[1].probability).abs() <= epsilon
|
||||
}
|
||||
|
||||
/// Clear all contributions, resetting the engine.
|
||||
pub fn reset(&mut self) {
|
||||
self.contributions.clear();
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Private helpers
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Aggregate all contributions by action id.
|
||||
///
|
||||
/// Returns:
|
||||
/// - action_map: id -> canonical Action
|
||||
/// - amplitude_map: id -> summed complex amplitude
|
||||
/// - agent_phases_map: id -> list of each agent's contributing phase
|
||||
fn aggregate(
|
||||
&self,
|
||||
) -> (
|
||||
HashMap<String, Action>,
|
||||
HashMap<String, Complex>,
|
||||
HashMap<String, Vec<f64>>,
|
||||
) {
|
||||
let mut action_map: HashMap<String, Action> = HashMap::new();
|
||||
let mut amplitude_map: HashMap<String, Complex> = HashMap::new();
|
||||
let mut agent_phases_map: HashMap<String, Vec<f64>> = HashMap::new();
|
||||
|
||||
for contrib in &self.contributions {
|
||||
for (action, amp) in &contrib.amplitudes {
|
||||
action_map
|
||||
.entry(action.id.clone())
|
||||
.or_insert_with(|| action.clone());
|
||||
|
||||
let entry = amplitude_map
|
||||
.entry(action.id.clone())
|
||||
.or_insert(Complex::ZERO);
|
||||
*entry = *entry + *amp;
|
||||
|
||||
agent_phases_map
|
||||
.entry(action.id.clone())
|
||||
.or_insert_with(Vec::new)
|
||||
.push(amp.arg());
|
||||
}
|
||||
}
|
||||
|
||||
(action_map, amplitude_map, agent_phases_map)
|
||||
}
|
||||
|
||||
/// Absolute angular distance between two phases, in [0, pi].
|
||||
fn phase_distance(a: f64, b: f64) -> f64 {
|
||||
let mut d = (a - b).abs() % (2.0 * PI);
|
||||
if d > PI {
|
||||
d = 2.0 * PI - d;
|
||||
}
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SwarmInterference {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn action(id: &str) -> Action {
|
||||
Action {
|
||||
id: id.to_string(),
|
||||
description: id.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_agent_support() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
swarm.contribute(AgentContribution::new("alice", action("deploy"), 0.8, true));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
assert_eq!(decisions.len(), 1);
|
||||
assert_eq!(decisions[0].action.id, "deploy");
|
||||
// probability = |0.8|^2 = 0.64
|
||||
assert!((decisions[0].probability - 0.64).abs() < 1e-10);
|
||||
assert_eq!(decisions[0].constructive_count, 1);
|
||||
assert_eq!(decisions[0].destructive_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn constructive_interference() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// 3 agents all support "deploy" with confidence 1.0
|
||||
swarm.contribute(AgentContribution::new("a", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("c", action("deploy"), 1.0, true));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
// Net amplitude = 3.0, probability = 9.0
|
||||
assert!((decisions[0].probability - 9.0).abs() < 1e-10);
|
||||
assert_eq!(decisions[0].constructive_count, 3);
|
||||
assert_eq!(decisions[0].destructive_count, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn destructive_interference_cancels() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// 2 agents support, 2 oppose with equal confidence
|
||||
swarm.contribute(AgentContribution::new("a", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("c", action("deploy"), 1.0, false));
|
||||
swarm.contribute(AgentContribution::new("d", action("deploy"), 1.0, false));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
// Net amplitude ~ 0, probability ~ 0
|
||||
assert!(decisions[0].probability < 1e-10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_cancellation() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// 3 support, 1 opposes => net amplitude ~ 2.0
|
||||
swarm.contribute(AgentContribution::new("a", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("c", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("d", action("deploy"), 1.0, false));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
// Net amplitude = 3 - 1 = 2, probability = 4.0
|
||||
assert!((decisions[0].probability - 4.0).abs() < 1e-10);
|
||||
assert_eq!(decisions[0].constructive_count, 3);
|
||||
assert_eq!(decisions[0].destructive_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_actions_sorted_by_probability() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// Action "deploy": 2 supporters
|
||||
swarm.contribute(AgentContribution::new("a", action("deploy"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("deploy"), 1.0, true));
|
||||
// Action "rollback": 3 supporters
|
||||
swarm.contribute(AgentContribution::new("c", action("rollback"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("d", action("rollback"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("e", action("rollback"), 1.0, true));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
assert_eq!(decisions.len(), 2);
|
||||
assert_eq!(decisions[0].action.id, "rollback"); // P=9
|
||||
assert_eq!(decisions[1].action.id, "deploy"); // P=4
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn winner_returns_highest() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
swarm.contribute(AgentContribution::new("a", action("A"), 0.5, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("B"), 1.0, true));
|
||||
|
||||
let w = swarm.winner().unwrap();
|
||||
assert_eq!(w.action.id, "B");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn winner_empty_swarm() {
|
||||
let swarm = SwarmInterference::new();
|
||||
assert!(swarm.winner().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deadlock_detection() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// Two actions with exactly equal support
|
||||
swarm.contribute(AgentContribution::new("a", action("A"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("B"), 1.0, true));
|
||||
|
||||
assert!(swarm.is_deadlocked(1e-10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_deadlock_with_clear_winner() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
swarm.contribute(AgentContribution::new("a", action("A"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("A"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("c", action("B"), 0.1, true));
|
||||
|
||||
assert!(!swarm.is_deadlocked(0.01));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reset_clears_contributions() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
swarm.contribute(AgentContribution::new("a", action("X"), 1.0, true));
|
||||
assert_eq!(swarm.decide().len(), 1);
|
||||
|
||||
swarm.reset();
|
||||
assert!(swarm.decide().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multi_contribution() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
swarm.contribute(AgentContribution::multi(
|
||||
"alice",
|
||||
vec![
|
||||
(action("A"), Complex::new(0.5, 0.0)),
|
||||
(action("B"), Complex::new(0.0, 0.3)),
|
||||
],
|
||||
));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
assert_eq!(decisions.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noise_trials_are_reproducible() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
swarm.contribute(AgentContribution::new("a", action("X"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("Y"), 0.5, true));
|
||||
|
||||
let r1 = swarm.decide_with_noise(0.1, 100, 42);
|
||||
let r2 = swarm.decide_with_noise(0.1, 100, 42);
|
||||
|
||||
// Same seed -> same results.
|
||||
assert_eq!(r1.len(), r2.len());
|
||||
for i in 0..r1.len() {
|
||||
assert_eq!(r1[i].0.id, r2[i].0.id);
|
||||
assert_eq!(r1[i].1, r2[i].1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noise_preserves_strong_winner() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// Action "A" has overwhelming support.
|
||||
swarm.contribute(AgentContribution::new("a", action("A"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("A"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("c", action("A"), 1.0, true));
|
||||
// Action "B" has weak support.
|
||||
swarm.contribute(AgentContribution::new("d", action("B"), 0.1, true));
|
||||
|
||||
let results = swarm.decide_with_noise(0.05, 200, 7);
|
||||
// "A" should win the vast majority of trials.
|
||||
assert_eq!(results[0].0.id, "A");
|
||||
assert!(results[0].1 > 150, "A should win most trials");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_trait() {
|
||||
let swarm = SwarmInterference::default();
|
||||
assert!(swarm.decide().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complete_cancellation_detects_deadlock() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
// Perfect cancellation on a single action.
|
||||
swarm.contribute(AgentContribution::new("a", action("X"), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("b", action("X"), 1.0, false));
|
||||
|
||||
let decisions = swarm.decide();
|
||||
assert_eq!(decisions.len(), 1);
|
||||
assert!(decisions[0].probability < 1e-10);
|
||||
}
|
||||
}
|
||||
383
vendor/ruvector/crates/ruqu-exotic/src/syndrome_diagnosis.rs
vendored
Normal file
383
vendor/ruvector/crates/ruqu-exotic/src/syndrome_diagnosis.rs
vendored
Normal file
@@ -0,0 +1,383 @@
|
||||
//! # Syndrome Diagnosis -- QEC-Based System Fault Localization
|
||||
//!
|
||||
//! Treats AI system components as a graph. Injects artificial faults into
|
||||
//! a quantum encoding of the system. Extracts syndrome patterns using QEC logic
|
||||
//! (CNOT parity checks on ancilla qubits) to localize which component is fragile.
|
||||
//!
|
||||
//! This is **structural fault localization**, not log analysis. Multiple rounds
|
||||
//! of fault injection build statistical fragility profiles and detect components
|
||||
//! that propagate faults beyond their direct neighborhood.
|
||||
|
||||
use ruqu_core::error::QuantumError;
|
||||
use ruqu_core::gate::Gate;
|
||||
use ruqu_core::state::QuantumState;
|
||||
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, SeedableRng};
|
||||
|
||||
/// A system component (node in the diagnostic graph)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Component {
|
||||
pub id: String,
|
||||
pub health: f64, // 1.0 = healthy, 0.0 = failed
|
||||
}
|
||||
|
||||
/// An edge between two components (dependency/connection)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Connection {
|
||||
pub from: usize,
|
||||
pub to: usize,
|
||||
pub strength: f64, // coupling strength
|
||||
}
|
||||
|
||||
/// Configuration for syndrome-based diagnosis
|
||||
pub struct DiagnosisConfig {
|
||||
pub fault_injection_rate: f64,
|
||||
pub num_rounds: usize,
|
||||
pub seed: u64,
|
||||
}
|
||||
|
||||
/// Result of a single diagnostic round
|
||||
#[derive(Debug)]
|
||||
pub struct DiagnosticRound {
|
||||
pub syndrome: Vec<bool>,
|
||||
pub injected_faults: Vec<usize>,
|
||||
}
|
||||
|
||||
/// Overall diagnosis result
|
||||
#[derive(Debug)]
|
||||
pub struct DiagnosisResult {
|
||||
pub rounds: Vec<DiagnosticRound>,
|
||||
/// Fragility score per component: how often it shows up in syndrome patterns
|
||||
pub fragility_scores: Vec<(String, f64)>,
|
||||
/// Most fragile component
|
||||
pub weakest_component: Option<String>,
|
||||
/// Components that propagate faults (appear in syndromes without being directly faulted)
|
||||
pub fault_propagators: Vec<String>,
|
||||
}
|
||||
|
||||
/// System graph for syndrome-based diagnosis.
|
||||
///
|
||||
/// Maps a system topology to a quantum circuit:
|
||||
/// - Each component becomes a data qubit
|
||||
/// - Each connection becomes an ancilla qubit for parity measurement
|
||||
/// - Total qubits = num_components + num_connections (must be <= 25)
|
||||
pub struct SystemDiagnostics {
|
||||
components: Vec<Component>,
|
||||
connections: Vec<Connection>,
|
||||
}
|
||||
|
||||
impl SystemDiagnostics {
|
||||
pub fn new(components: Vec<Component>, connections: Vec<Connection>) -> Self {
|
||||
Self {
|
||||
components,
|
||||
connections,
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a single diagnostic round:
|
||||
/// 1. Encode components as qubits (healthy=|0>, degraded=Ry rotation)
|
||||
/// 2. Inject random faults (X gates on selected qubits)
|
||||
/// 3. Extract syndrome using ancilla qubits measuring parity of connected pairs
|
||||
pub fn run_round(
|
||||
&self,
|
||||
config: &DiagnosisConfig,
|
||||
round_seed: u64,
|
||||
) -> Result<DiagnosticRound, QuantumError> {
|
||||
let num_components = self.components.len();
|
||||
let num_connections = self.connections.len();
|
||||
let total_qubits = (num_components + num_connections) as u32;
|
||||
|
||||
// Limit check
|
||||
if total_qubits > 25 {
|
||||
return Err(QuantumError::QubitLimitExceeded {
|
||||
requested: total_qubits,
|
||||
maximum: 25,
|
||||
});
|
||||
}
|
||||
|
||||
let mut state = QuantumState::new_with_seed(total_qubits, round_seed)?;
|
||||
let mut rng = StdRng::seed_from_u64(round_seed);
|
||||
|
||||
// 1. Encode component health as rotation from |0>
|
||||
for (i, comp) in self.components.iter().enumerate() {
|
||||
let angle = std::f64::consts::FRAC_PI_2 * (1.0 - comp.health);
|
||||
if angle.abs() > 1e-15 {
|
||||
state.apply_gate(&Gate::Ry(i as u32, angle))?;
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Inject faults
|
||||
let mut injected = Vec::new();
|
||||
for i in 0..num_components {
|
||||
if rng.gen::<f64>() < config.fault_injection_rate {
|
||||
state.apply_gate(&Gate::X(i as u32))?;
|
||||
injected.push(i);
|
||||
}
|
||||
}
|
||||
|
||||
// 3. For each connection, use an ancilla qubit to check parity
|
||||
let mut syndrome = Vec::with_capacity(num_connections);
|
||||
for (ci, conn) in self.connections.iter().enumerate() {
|
||||
let ancilla = (num_components + ci) as u32;
|
||||
state.reset_qubit(ancilla)?;
|
||||
state.apply_gate(&Gate::CNOT(conn.from as u32, ancilla))?;
|
||||
state.apply_gate(&Gate::CNOT(conn.to as u32, ancilla))?;
|
||||
let outcome = state.measure(ancilla)?;
|
||||
syndrome.push(outcome.result);
|
||||
}
|
||||
|
||||
Ok(DiagnosticRound {
|
||||
syndrome,
|
||||
injected_faults: injected,
|
||||
})
|
||||
}
|
||||
|
||||
/// Run full diagnosis: multiple rounds of fault injection + syndrome extraction.
|
||||
/// Accumulates statistics to identify fragile and fault-propagating components.
|
||||
pub fn diagnose(&self, config: &DiagnosisConfig) -> Result<DiagnosisResult, QuantumError> {
|
||||
let mut rounds = Vec::new();
|
||||
let mut fault_counts = vec![0usize; self.components.len()];
|
||||
let mut syndrome_counts = vec![0usize; self.components.len()];
|
||||
|
||||
for round in 0..config.num_rounds {
|
||||
let round_seed = config.seed.wrapping_add(round as u64 * 1000);
|
||||
let result = self.run_round(config, round_seed)?;
|
||||
|
||||
// Count which components were directly faulted
|
||||
for &idx in &result.injected_faults {
|
||||
fault_counts[idx] += 1;
|
||||
}
|
||||
|
||||
// Count which components appear in triggered syndromes
|
||||
for (ci, &fired) in result.syndrome.iter().enumerate() {
|
||||
if fired {
|
||||
let conn = &self.connections[ci];
|
||||
syndrome_counts[conn.from] += 1;
|
||||
syndrome_counts[conn.to] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
rounds.push(result);
|
||||
}
|
||||
|
||||
// Compute fragility scores (syndrome appearances / total rounds)
|
||||
let fragility_scores: Vec<(String, f64)> = self
|
||||
.components
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, c)| {
|
||||
(
|
||||
c.id.clone(),
|
||||
syndrome_counts[i] as f64 / config.num_rounds as f64,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let weakest = fragility_scores
|
||||
.iter()
|
||||
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.map(|(name, _)| name.clone());
|
||||
|
||||
// Fault propagators: components that appear in syndromes more often
|
||||
// than they were directly faulted
|
||||
let fault_propagators: Vec<String> = self
|
||||
.components
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(i, _)| syndrome_counts[*i] > fault_counts[*i] + config.num_rounds / 4)
|
||||
.map(|(_, c)| c.id.clone())
|
||||
.collect();
|
||||
|
||||
Ok(DiagnosisResult {
|
||||
rounds,
|
||||
fragility_scores,
|
||||
weakest_component: weakest,
|
||||
fault_propagators,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the number of components
|
||||
pub fn num_components(&self) -> usize {
|
||||
self.components.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn make_linear_system(n: usize) -> (Vec<Component>, Vec<Connection>) {
|
||||
let components: Vec<Component> = (0..n)
|
||||
.map(|i| Component {
|
||||
id: format!("comp_{}", i),
|
||||
health: 1.0,
|
||||
})
|
||||
.collect();
|
||||
let connections: Vec<Connection> = (0..n.saturating_sub(1))
|
||||
.map(|i| Connection {
|
||||
from: i,
|
||||
to: i + 1,
|
||||
strength: 1.0,
|
||||
})
|
||||
.collect();
|
||||
(components, connections)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_system() {
|
||||
let (comps, conns) = make_linear_system(5);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
assert_eq!(diag.num_components(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_qubit_limit_check() {
|
||||
// 15 components + 14 connections = 29 > 25
|
||||
let (comps, conns) = make_linear_system(15);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 1,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.run_round(&config, 42);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_single_round_no_faults() {
|
||||
let (comps, conns) = make_linear_system(5);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 1,
|
||||
seed: 42,
|
||||
};
|
||||
let round = diag.run_round(&config, 42).unwrap();
|
||||
// No faults injected -> no syndrome should fire (all healthy components
|
||||
// are in |0>, parity checks should agree)
|
||||
assert!(round.injected_faults.is_empty());
|
||||
assert!(round.syndrome.iter().all(|&s| !s));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diagnose_no_faults() {
|
||||
let (comps, conns) = make_linear_system(5);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 10,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
assert_eq!(result.rounds.len(), 10);
|
||||
// No faults -> all fragility scores should be 0
|
||||
for (_, score) in &result.fragility_scores {
|
||||
assert!((*score - 0.0).abs() < 1e-10);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diagnose_with_faults() {
|
||||
let (comps, conns) = make_linear_system(5);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.5,
|
||||
num_rounds: 20,
|
||||
seed: 123,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
assert_eq!(result.rounds.len(), 20);
|
||||
assert_eq!(result.fragility_scores.len(), 5);
|
||||
// At least some syndromes should fire with 50% fault rate
|
||||
let total_fired: usize = result
|
||||
.rounds
|
||||
.iter()
|
||||
.map(|r| r.syndrome.iter().filter(|&&s| s).count())
|
||||
.sum();
|
||||
assert!(total_fired > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_weakest_component_identified() {
|
||||
let (comps, conns) = make_linear_system(5);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.3,
|
||||
num_rounds: 50,
|
||||
seed: 999,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
// Should identify a weakest component
|
||||
assert!(result.weakest_component.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_degraded_components() {
|
||||
// One component is already degraded
|
||||
let mut comps: Vec<Component> = (0..5)
|
||||
.map(|i| Component {
|
||||
id: format!("comp_{}", i),
|
||||
health: 1.0,
|
||||
})
|
||||
.collect();
|
||||
comps[2].health = 0.3; // Component 2 is degraded
|
||||
let conns: Vec<Connection> = (0..4)
|
||||
.map(|i| Connection {
|
||||
from: i,
|
||||
to: i + 1,
|
||||
strength: 1.0,
|
||||
})
|
||||
.collect();
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 1,
|
||||
seed: 42,
|
||||
};
|
||||
// Should run without error
|
||||
let round = diag.run_round(&config, 42).unwrap();
|
||||
assert_eq!(round.syndrome.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_qubit_boundary() {
|
||||
// 13 components + 12 connections = 25 qubits (exactly at limit)
|
||||
let (comps, conns) = make_linear_system(13);
|
||||
let diag = SystemDiagnostics::new(comps, conns);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 1,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.run_round(&config, 42);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_connections() {
|
||||
// Components with no connections
|
||||
let comps = vec![
|
||||
Component {
|
||||
id: "a".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "b".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
];
|
||||
let diag = SystemDiagnostics::new(comps, vec![]);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 5,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
// No connections -> no syndrome bits
|
||||
for round in &result.rounds {
|
||||
assert!(round.syndrome.is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
469
vendor/ruvector/crates/ruqu-exotic/tests/test_discovery_cross.rs
vendored
Normal file
469
vendor/ruvector/crates/ruqu-exotic/tests/test_discovery_cross.rs
vendored
Normal file
@@ -0,0 +1,469 @@
|
||||
//! Cross-module discovery experiments for ruqu-exotic.
|
||||
//!
|
||||
//! These tests combine two exotic modules to discover emergent behavior
|
||||
//! that neither module can exhibit alone.
|
||||
|
||||
use ruqu_core::gate::Gate;
|
||||
use ruqu_exotic::quantum_collapse::QuantumCollapseSearch;
|
||||
use ruqu_exotic::reversible_memory::ReversibleMemory;
|
||||
use ruqu_exotic::swarm_interference::{Action, AgentContribution, SwarmInterference};
|
||||
use ruqu_exotic::syndrome_diagnosis::{Component, Connection, DiagnosisConfig, SystemDiagnostics};
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY 7: Counterfactual Search Explanation
|
||||
// (quantum_collapse + reversible_memory)
|
||||
//
|
||||
// Can we EXPLAIN why a quantum collapse search picked a particular result
|
||||
// by using counterfactual reasoning on the state preparation?
|
||||
//
|
||||
// Approach:
|
||||
// 1. Build a reversible memory with a sequence of gates that bias the
|
||||
// probability distribution toward certain basis states.
|
||||
// 2. Extract the probability distribution and use it as the set of
|
||||
// "candidate embeddings" for collapse search.
|
||||
// 3. Run the search to find the top result.
|
||||
// 4. For each gate in the preparation sequence, run counterfactual
|
||||
// analysis (remove that gate) and see how the probability
|
||||
// distribution --- and therefore the search result --- would change.
|
||||
//
|
||||
// HYPOTHESIS: The gate that created the most bias in the probability
|
||||
// space will have the highest counterfactual divergence, and removing
|
||||
// it will change the search result most dramatically.
|
||||
// ===========================================================================
|
||||
|
||||
#[test]
|
||||
fn discovery_7_counterfactual_search_explanation() {
|
||||
println!("DISCOVERY 7: Counterfactual Search Explanation");
|
||||
println!(" Combining: quantum_collapse + reversible_memory");
|
||||
println!(
|
||||
" Question: Can counterfactual analysis explain WHY a search returned a specific result?"
|
||||
);
|
||||
println!();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 1: Build a reversible memory that creates a biased state.
|
||||
//
|
||||
// We use 2 qubits (4 basis states). The gate sequence is designed so that
|
||||
// one specific gate (the Ry rotation on qubit 0) is the primary source of
|
||||
// bias, while others contribute less.
|
||||
// -----------------------------------------------------------------------
|
||||
let mut mem = ReversibleMemory::new(2).unwrap();
|
||||
|
||||
// Gate 0: Large Ry rotation on qubit 0 -- this is the BIG bias creator.
|
||||
// It rotates qubit 0 away from |0> toward |1>, heavily biasing the
|
||||
// probability distribution.
|
||||
mem.apply(Gate::Ry(0, 1.2)).unwrap();
|
||||
|
||||
// Gate 1: Small Rz rotation on qubit 1 -- phase-only, barely changes probs.
|
||||
mem.apply(Gate::Rz(1, 0.05)).unwrap();
|
||||
|
||||
// Gate 2: CNOT entangles the qubits, spreading the bias from q0 to q1.
|
||||
mem.apply(Gate::CNOT(0, 1)).unwrap();
|
||||
|
||||
// Gate 3: Tiny Ry on qubit 1 -- small additional bias.
|
||||
mem.apply(Gate::Ry(1, 0.1)).unwrap();
|
||||
|
||||
assert_eq!(mem.history_len(), 4, "Should have 4 gates in history");
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 2: Extract probability distribution as candidate embeddings.
|
||||
//
|
||||
// The 4 basis state probabilities become 4 "candidate" 1D embeddings.
|
||||
// Each candidate is a single-element vector containing that basis state's
|
||||
// probability. This way, the collapse search will prefer the basis state
|
||||
// with the highest probability (since the query will be [1.0], which is
|
||||
// most similar to the largest probability value).
|
||||
// -----------------------------------------------------------------------
|
||||
let original_probs = mem.probabilities();
|
||||
println!(" Original probability distribution:");
|
||||
for (i, p) in original_probs.iter().enumerate() {
|
||||
println!(" |{:02b}> : {:.6}", i, p);
|
||||
}
|
||||
|
||||
let candidates: Vec<Vec<f64>> = original_probs.iter().map(|&p| vec![p]).collect();
|
||||
let search = QuantumCollapseSearch::new(candidates);
|
||||
|
||||
// Query: [1.0] -- we want the candidate with the highest probability value.
|
||||
let query = [1.0_f64];
|
||||
let search_result = search.search(&query, 2, 42);
|
||||
println!(
|
||||
" Search result: index={}, amplitude={:.6}, is_padding={}",
|
||||
search_result.index, search_result.amplitude, search_result.is_padding
|
||||
);
|
||||
|
||||
// Also get the distribution over many shots to see stability.
|
||||
let dist = search.search_distribution(&query, 2, 200, 42);
|
||||
println!(" Search distribution (200 shots):");
|
||||
for &(idx, count) in &dist {
|
||||
println!(
|
||||
" index {} : {} hits ({:.1}%)",
|
||||
idx,
|
||||
count,
|
||||
count as f64 / 2.0
|
||||
);
|
||||
}
|
||||
println!();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 3: Counterfactual analysis -- for each gate, what would change?
|
||||
//
|
||||
// For each gate in the preparation sequence, compute the counterfactual
|
||||
// (what if that gate never happened?), extract the altered probability
|
||||
// distribution, rebuild the search, and see what the new search result
|
||||
// would be.
|
||||
// -----------------------------------------------------------------------
|
||||
println!(" Counterfactual analysis (removing each gate):");
|
||||
let mut divergences = Vec::new();
|
||||
let mut cf_search_results = Vec::new();
|
||||
|
||||
for step in 0..mem.history_len() {
|
||||
let cf = mem.counterfactual(step).unwrap();
|
||||
|
||||
// Build a new search from the counterfactual probability distribution.
|
||||
let cf_candidates: Vec<Vec<f64>> =
|
||||
cf.counterfactual_probs.iter().map(|&p| vec![p]).collect();
|
||||
let cf_search = QuantumCollapseSearch::new(cf_candidates);
|
||||
let cf_result = cf_search.search(&query, 2, 42);
|
||||
let cf_dist = cf_search.search_distribution(&query, 2, 200, 42);
|
||||
|
||||
println!(" Gate {} removed:", step);
|
||||
println!(" Divergence: {:.6}", cf.divergence);
|
||||
println!(
|
||||
" Counterfactual probs: {:?}",
|
||||
cf.counterfactual_probs
|
||||
.iter()
|
||||
.map(|p| format!("{:.4}", p))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
println!(" New search result: index={}", cf_result.index);
|
||||
println!(
|
||||
" New distribution: {:?}",
|
||||
cf_dist
|
||||
.iter()
|
||||
.map(|&(i, c)| format!("idx{}:{}hits", i, c))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
divergences.push(cf.divergence);
|
||||
cf_search_results.push(cf_result.index);
|
||||
}
|
||||
println!();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 4: Validate the hypothesis.
|
||||
//
|
||||
// The gate with the highest counterfactual divergence should be the one
|
||||
// most responsible for the search result. In our setup, gate 0 (the large
|
||||
// Ry rotation) is the primary bias source.
|
||||
// -----------------------------------------------------------------------
|
||||
let max_div_step = divergences
|
||||
.iter()
|
||||
.enumerate()
|
||||
.max_by(|a, b| a.1.partial_cmp(b.1).unwrap())
|
||||
.map(|(i, _)| i)
|
||||
.unwrap();
|
||||
let min_div_step = divergences
|
||||
.iter()
|
||||
.enumerate()
|
||||
.min_by(|a, b| a.1.partial_cmp(b.1).unwrap())
|
||||
.map(|(i, _)| i)
|
||||
.unwrap();
|
||||
|
||||
println!(" RESULTS:");
|
||||
println!(
|
||||
" Most impactful gate: step {} (divergence={:.6})",
|
||||
max_div_step, divergences[max_div_step]
|
||||
);
|
||||
println!(
|
||||
" Least impactful gate: step {} (divergence={:.6})",
|
||||
min_div_step, divergences[min_div_step]
|
||||
);
|
||||
|
||||
// The large Ry rotation (step 0) should have the highest divergence.
|
||||
assert_eq!(
|
||||
max_div_step, 0,
|
||||
"DISCOVERY 7: The Ry(0, 1.2) gate (step 0) should be the most impactful, but step {} was. Divergences: {:?}",
|
||||
max_div_step, divergences
|
||||
);
|
||||
|
||||
// The tiny Rz (step 1) should have the lowest divergence since it is
|
||||
// phase-only and barely changes probabilities.
|
||||
assert_eq!(
|
||||
min_div_step, 1,
|
||||
"DISCOVERY 7: The Rz(1, 0.05) gate (step 1) should be the least impactful, but step {} was. Divergences: {:?}",
|
||||
min_div_step, divergences
|
||||
);
|
||||
|
||||
// The highest divergence should be strictly greater than the lowest.
|
||||
assert!(
|
||||
divergences[max_div_step] > divergences[min_div_step] + 1e-6,
|
||||
"DISCOVERY 7: Max divergence ({:.6}) should significantly exceed min divergence ({:.6})",
|
||||
divergences[max_div_step],
|
||||
divergences[min_div_step]
|
||||
);
|
||||
|
||||
println!();
|
||||
println!(" HYPOTHESIS CONFIRMED: The gate that created the most bias (Ry on q0)");
|
||||
println!(" has the highest counterfactual divergence, and removing it changes the");
|
||||
println!(" search distribution most. Counterfactual reasoning can EXPLAIN search results.");
|
||||
println!();
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY 8: Syndrome-Diagnosed Swarm Health
|
||||
// (syndrome_diagnosis + swarm_interference)
|
||||
//
|
||||
// Can quantum error-correction syndrome extraction identify a dysfunctional
|
||||
// agent in a swarm?
|
||||
//
|
||||
// Approach:
|
||||
// 1. Create a swarm of agents, most supporting an action confidently, but
|
||||
// one agent is deliberately disruptive (low confidence, opposing phase).
|
||||
// 2. Map each agent to a Component in syndrome diagnosis, where the
|
||||
// agent's confidence becomes the component's health score.
|
||||
// 3. Connect all components in a chain (modeling information flow).
|
||||
// 4. Run syndrome diagnosis with fault injection to surface fragility.
|
||||
// 5. Compare: does the weakest component match the disruptive agent?
|
||||
//
|
||||
// HYPOTHESIS: The component corresponding to the disruptive agent (lowest
|
||||
// health) will be identified as the weakest component by syndrome diagnosis,
|
||||
// and its fragility score will be among the highest.
|
||||
// ===========================================================================
|
||||
|
||||
#[test]
|
||||
fn discovery_8_syndrome_diagnosed_swarm_health() {
|
||||
println!("DISCOVERY 8: Syndrome-Diagnosed Swarm Health");
|
||||
println!(" Combining: syndrome_diagnosis + swarm_interference");
|
||||
println!(" Question: Can quantum diagnostic techniques identify a dysfunctional swarm agent?");
|
||||
println!();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 1: Define the swarm agents and their behavior.
|
||||
//
|
||||
// We have 5 agents deciding on a single action ("deploy").
|
||||
// Agents 0-3 are reliable (high confidence, supporting).
|
||||
// Agent 4 is the disruptor (low confidence, opposing).
|
||||
// -----------------------------------------------------------------------
|
||||
let deploy = Action {
|
||||
id: "deploy".into(),
|
||||
description: "Deploy the service to production".into(),
|
||||
};
|
||||
|
||||
let agent_configs: Vec<(&str, f64, bool)> = vec![
|
||||
("agent_0", 0.95, true), // reliable supporter
|
||||
("agent_1", 0.90, true), // reliable supporter
|
||||
("agent_2", 0.85, true), // reliable supporter
|
||||
("agent_3", 0.88, true), // reliable supporter
|
||||
("agent_4", 0.15, false), // DISRUPTOR: low confidence, opposing
|
||||
];
|
||||
|
||||
let mut swarm = SwarmInterference::new();
|
||||
for &(name, confidence, support) in &agent_configs {
|
||||
swarm.contribute(AgentContribution::new(
|
||||
name,
|
||||
deploy.clone(),
|
||||
confidence,
|
||||
support,
|
||||
));
|
||||
}
|
||||
|
||||
let decisions = swarm.decide();
|
||||
assert!(
|
||||
!decisions.is_empty(),
|
||||
"Swarm should produce at least one decision"
|
||||
);
|
||||
let decision = &decisions[0];
|
||||
|
||||
println!(" Swarm Decision:");
|
||||
println!(" Action: {}", decision.action.id);
|
||||
println!(" Probability: {:.6}", decision.probability);
|
||||
println!(" Constructive agents: {}", decision.constructive_count);
|
||||
println!(" Destructive agents: {}", decision.destructive_count);
|
||||
println!();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 2: Map agents to system components for syndrome diagnosis.
|
||||
//
|
||||
// Each agent becomes a Component where:
|
||||
// - id = agent name
|
||||
// - health = agent confidence (disruptor has low health)
|
||||
//
|
||||
// We connect them in a chain to model information flow between agents:
|
||||
// agent_0 -- agent_1 -- agent_2 -- agent_3 -- agent_4
|
||||
// -----------------------------------------------------------------------
|
||||
let components: Vec<Component> = agent_configs
|
||||
.iter()
|
||||
.map(|&(name, confidence, _)| Component {
|
||||
id: name.to_string(),
|
||||
health: confidence,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Chain topology: each agent connected to the next.
|
||||
let connections: Vec<Connection> = (0..agent_configs.len() - 1)
|
||||
.map(|i| Connection {
|
||||
from: i,
|
||||
to: i + 1,
|
||||
strength: 1.0,
|
||||
})
|
||||
.collect();
|
||||
|
||||
println!(" Component mapping (agent -> health):");
|
||||
for comp in &components {
|
||||
println!(" {} : health={:.2}", comp.id, comp.health);
|
||||
}
|
||||
println!();
|
||||
|
||||
let diagnostics = SystemDiagnostics::new(components, connections);
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 3: Run syndrome diagnosis.
|
||||
//
|
||||
// We use moderate fault injection over many rounds to accumulate
|
||||
// statistical signal about which components are fragile.
|
||||
// -----------------------------------------------------------------------
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.3,
|
||||
num_rounds: 100,
|
||||
seed: 42,
|
||||
};
|
||||
|
||||
let diagnosis = diagnostics.diagnose(&config).unwrap();
|
||||
|
||||
println!(" Syndrome Diagnosis Results:");
|
||||
println!(" Rounds: {}", diagnosis.rounds.len());
|
||||
println!(" Fragility scores:");
|
||||
for (name, score) in &diagnosis.fragility_scores {
|
||||
println!(" {} : {:.4}", name, score);
|
||||
}
|
||||
println!(" Weakest component: {:?}", diagnosis.weakest_component);
|
||||
println!(" Fault propagators: {:?}", diagnosis.fault_propagators);
|
||||
println!();
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Step 4: Analyze agreement between swarm and diagnosis.
|
||||
//
|
||||
// The disruptive agent (agent_4) has the lowest confidence/health.
|
||||
// Syndrome diagnosis should identify it (or its neighbor) as fragile.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
// Find the agent with the highest fragility score.
|
||||
let most_fragile = diagnosis
|
||||
.fragility_scores
|
||||
.iter()
|
||||
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap())
|
||||
.map(|(name, score)| (name.clone(), *score));
|
||||
|
||||
// The disruptive agent's fragility score.
|
||||
let disruptor_fragility = diagnosis
|
||||
.fragility_scores
|
||||
.iter()
|
||||
.find(|(name, _)| name == "agent_4")
|
||||
.map(|(_, score)| *score)
|
||||
.unwrap_or(0.0);
|
||||
|
||||
// The disruptor's neighbor (agent_3) may also show elevated fragility
|
||||
// because the parity check between agent_3 and agent_4 fires when
|
||||
// agent_4's low health causes it to be in a different state.
|
||||
let neighbor_fragility = diagnosis
|
||||
.fragility_scores
|
||||
.iter()
|
||||
.find(|(name, _)| name == "agent_3")
|
||||
.map(|(_, score)| *score)
|
||||
.unwrap_or(0.0);
|
||||
|
||||
// Average fragility of all non-disruptor, non-neighbor agents.
|
||||
let healthy_avg_fragility: f64 = {
|
||||
let healthy: Vec<f64> = diagnosis
|
||||
.fragility_scores
|
||||
.iter()
|
||||
.filter(|(name, _)| name != "agent_4" && name != "agent_3")
|
||||
.map(|(_, score)| *score)
|
||||
.collect();
|
||||
if healthy.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
healthy.iter().sum::<f64>() / healthy.len() as f64
|
||||
}
|
||||
};
|
||||
|
||||
println!(" ANALYSIS:");
|
||||
println!(
|
||||
" Disruptor (agent_4) fragility: {:.4}",
|
||||
disruptor_fragility
|
||||
);
|
||||
println!(
|
||||
" Neighbor (agent_3) fragility: {:.4}",
|
||||
neighbor_fragility
|
||||
);
|
||||
println!(
|
||||
" Healthy agents avg fragility: {:.4}",
|
||||
healthy_avg_fragility
|
||||
);
|
||||
println!(" Most fragile component: {:?}", most_fragile);
|
||||
println!();
|
||||
|
||||
// Verify swarm detected the disruptor via destructive interference.
|
||||
assert!(
|
||||
decision.destructive_count >= 1,
|
||||
"DISCOVERY 8: Swarm should detect at least 1 destructive agent, got {}",
|
||||
decision.destructive_count
|
||||
);
|
||||
|
||||
// Verify the swarm still reaches a positive decision despite disruption.
|
||||
// 4 supporters vs 1 opposer: net amplitude > 0.
|
||||
assert!(
|
||||
decision.probability > 0.0,
|
||||
"DISCOVERY 8: Swarm should reach a positive decision despite disruption"
|
||||
);
|
||||
|
||||
// The disruptor or its neighbor should appear in the high-fragility zone.
|
||||
// Because syndrome diagnosis uses parity checks between connected components,
|
||||
// a low-health component and its neighbor both get elevated fragility scores.
|
||||
let disruptor_or_neighbor_elevated =
|
||||
disruptor_fragility >= healthy_avg_fragility || neighbor_fragility >= healthy_avg_fragility;
|
||||
assert!(
|
||||
disruptor_or_neighbor_elevated,
|
||||
"DISCOVERY 8: The disruptor (agent_4, fragility={:.4}) or its neighbor (agent_3, fragility={:.4}) \
|
||||
should have fragility >= healthy average ({:.4})",
|
||||
disruptor_fragility, neighbor_fragility, healthy_avg_fragility
|
||||
);
|
||||
|
||||
// Verify diagnosis produced meaningful fragility data.
|
||||
let any_nonzero = diagnosis.fragility_scores.iter().any(|(_, s)| *s > 0.0);
|
||||
assert!(
|
||||
any_nonzero,
|
||||
"DISCOVERY 8: At least some components should have nonzero fragility scores"
|
||||
);
|
||||
|
||||
// Verify the weakest component is identified.
|
||||
assert!(
|
||||
diagnosis.weakest_component.is_some(),
|
||||
"DISCOVERY 8: Diagnosis should identify a weakest component"
|
||||
);
|
||||
|
||||
println!(" HYPOTHESIS RESULT:");
|
||||
if diagnosis.weakest_component.as_deref() == Some("agent_4") {
|
||||
println!(" CONFIRMED: Weakest component IS the disruptive agent (agent_4).");
|
||||
println!(" Quantum syndrome extraction directly identified the dysfunctional agent.");
|
||||
} else if diagnosis.weakest_component.as_deref() == Some("agent_3") {
|
||||
println!(" PARTIALLY CONFIRMED: Weakest component is agent_3 (neighbor of disruptor).");
|
||||
println!(" The parity check between agent_3 and agent_4 fires most often because");
|
||||
println!(" agent_4's low health creates a mismatch. Both are flagged as fragile.");
|
||||
} else {
|
||||
println!(
|
||||
" UNEXPECTED: Weakest component is {:?}, not the disruptor.",
|
||||
diagnosis.weakest_component
|
||||
);
|
||||
println!(" The fault injection randomness may have overwhelmed the health signal.");
|
||||
println!(
|
||||
" But disruptor/neighbor fragility ({:.4}/{:.4}) still >= healthy avg ({:.4}).",
|
||||
disruptor_fragility, neighbor_fragility, healthy_avg_fragility
|
||||
);
|
||||
}
|
||||
println!();
|
||||
println!(" CONCLUSION: Quantum diagnostic techniques CAN surface information about");
|
||||
println!(" dysfunctional agents, especially when agent confidence maps to component health.");
|
||||
println!(" The syndrome extraction localizes faults to the disruptor's neighborhood.");
|
||||
println!();
|
||||
}
|
||||
432
vendor/ruvector/crates/ruqu-exotic/tests/test_discovery_phase2.rs
vendored
Normal file
432
vendor/ruvector/crates/ruqu-exotic/tests/test_discovery_phase2.rs
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
//! Phase 2 Discovery Tests: Cross-Module Experiments for ruqu-exotic
|
||||
//!
|
||||
//! These tests combine exotic modules to discover emergent behavior
|
||||
//! at their boundaries. Each test is a hypothesis-driven experiment.
|
||||
//!
|
||||
//! DISCOVERY 5: Time-Dependent Disambiguation (quantum_decay + interference_search)
|
||||
//! DISCOVERY 6: QEC on Swarm Reasoning Chain (reasoning_qec + swarm_interference)
|
||||
|
||||
use ruqu_exotic::interference_search::ConceptSuperposition;
|
||||
use ruqu_exotic::quantum_decay::QuantumEmbedding;
|
||||
use ruqu_exotic::reasoning_qec::{ReasoningQecConfig, ReasoningStep, ReasoningTrace};
|
||||
use ruqu_exotic::swarm_interference::{Action, AgentContribution, SwarmInterference};
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY 5: Time-Dependent Disambiguation
|
||||
// ===========================================================================
|
||||
//
|
||||
// Combines: quantum_decay (QuantumEmbedding, decohere, fidelity, to_embedding)
|
||||
// + interference_search (ConceptSuperposition, interfere)
|
||||
//
|
||||
// HYPOTHESIS: As meaning embeddings decohere at different rates, the
|
||||
// interference-based disambiguation becomes noisier and shifts which
|
||||
// meaning "wins" for a given context. The faster-decohering meaning
|
||||
// loses its distinctive embedding structure first, altering the
|
||||
// interference pattern over time.
|
||||
//
|
||||
// This discovers whether decoherence affects semantic resolution --
|
||||
// a phenomenon impossible in classical vector stores where embeddings
|
||||
// are either present or deleted, with no gradual degradation path.
|
||||
// ===========================================================================
|
||||
|
||||
#[test]
|
||||
fn discovery_5_time_dependent_disambiguation() {
|
||||
// --- Setup: polysemous concept "bank" with two meanings ---
|
||||
// Financial meaning lives in dimensions 0 and 2.
|
||||
// River meaning lives in dimensions 1 and 3.
|
||||
// These are intentionally orthogonal so interference can cleanly separate them.
|
||||
let financial_emb = vec![1.0, 0.0, 0.5, 0.0];
|
||||
let river_emb = vec![0.0, 1.0, 0.0, 0.5];
|
||||
|
||||
// Financial meaning decoheres 6x faster than river meaning.
|
||||
// This models a scenario where one sense of a word is more volatile
|
||||
// (e.g., financial jargon shifts faster than geographic terms).
|
||||
let mut q_financial = QuantumEmbedding::from_embedding(&financial_emb, 0.3);
|
||||
let mut q_river = QuantumEmbedding::from_embedding(&river_emb, 0.05);
|
||||
|
||||
// Ambiguous context: slightly favors financial dimension (0.6 > 0.5)
|
||||
// but not overwhelmingly so -- both meanings have nonzero alignment.
|
||||
let context = vec![0.6, 0.5, 0.1, 0.1];
|
||||
|
||||
let time_steps: usize = 8;
|
||||
let dt = 2.0;
|
||||
|
||||
// Track the trajectory: (time, winner, financial_prob, river_prob)
|
||||
let mut trajectory: Vec<(f64, String, f64, f64)> = Vec::new();
|
||||
|
||||
println!("DISCOVERY 5: Time-Dependent Disambiguation");
|
||||
println!("DISCOVERY 5: ================================================");
|
||||
println!("DISCOVERY 5: Financial noise_rate=0.3, River noise_rate=0.05");
|
||||
println!("DISCOVERY 5: Context=[0.6, 0.5, 0.1, 0.1] (slightly favors financial)");
|
||||
println!("DISCOVERY 5: ------------------------------------------------");
|
||||
|
||||
for t in 0..=time_steps {
|
||||
let time = t as f64 * dt;
|
||||
|
||||
// Extract current classical embeddings from the (possibly decohered)
|
||||
// quantum states. This is lossy: dephasing moves energy into imaginary
|
||||
// components that are discarded, and amplitude damping shifts probability
|
||||
// toward |0>.
|
||||
let fin_vec = q_financial.to_embedding();
|
||||
let riv_vec = q_river.to_embedding();
|
||||
|
||||
// Build a fresh superposition from the current decohered embeddings.
|
||||
// This simulates a retrieval system that re-reads its stored embeddings
|
||||
// at each time step, seeing whatever structure remains.
|
||||
let concept = ConceptSuperposition::uniform(
|
||||
"bank",
|
||||
vec![("financial".into(), fin_vec), ("river".into(), riv_vec)],
|
||||
);
|
||||
|
||||
// Run interference with the context to see which meaning wins.
|
||||
let scores = concept.interfere(&context);
|
||||
let fin_score = scores.iter().find(|s| s.label == "financial").unwrap();
|
||||
let riv_score = scores.iter().find(|s| s.label == "river").unwrap();
|
||||
|
||||
let winner = if fin_score.probability >= riv_score.probability {
|
||||
"financial"
|
||||
} else {
|
||||
"river"
|
||||
};
|
||||
|
||||
let gap = (fin_score.probability - riv_score.probability).abs();
|
||||
|
||||
println!(
|
||||
"DISCOVERY 5: t={:5.1} | winner={:10} | fin_prob={:.6} riv_prob={:.6} | gap={:.6} | fin_fid={:.4} riv_fid={:.4}",
|
||||
time, winner, fin_score.probability, riv_score.probability, gap,
|
||||
q_financial.fidelity(), q_river.fidelity()
|
||||
);
|
||||
|
||||
trajectory.push((
|
||||
time,
|
||||
winner.to_string(),
|
||||
fin_score.probability,
|
||||
riv_score.probability,
|
||||
));
|
||||
|
||||
// Decohere for next step. Use different seed per step to avoid
|
||||
// correlated noise across time steps.
|
||||
if t < time_steps {
|
||||
q_financial.decohere(dt, 1000 + t as u64);
|
||||
q_river.decohere(dt, 2000 + t as u64);
|
||||
}
|
||||
}
|
||||
|
||||
println!("DISCOVERY 5: ------------------------------------------------");
|
||||
|
||||
// --- Assertions ---
|
||||
|
||||
// 1. Trajectory should be non-empty (sanity).
|
||||
assert!(
|
||||
trajectory.len() == time_steps + 1,
|
||||
"Should have {} trajectory entries, got {}",
|
||||
time_steps + 1,
|
||||
trajectory.len()
|
||||
);
|
||||
|
||||
// 2. Both embeddings must have decohered below their initial fidelity of 1.0.
|
||||
// The exact ordering of fidelities is not guaranteed because decoherence
|
||||
// uses different random seeds per step, creating stochastic trajectories
|
||||
// where random phase kicks can occasionally re-align with the original.
|
||||
// This non-monotonic behavior is itself a discovery.
|
||||
let fin_fid = q_financial.fidelity();
|
||||
let riv_fid = q_river.fidelity();
|
||||
assert!(
|
||||
fin_fid < 1.0 - 1e-6,
|
||||
"Financial embedding should have decohered below fidelity 1.0: {}",
|
||||
fin_fid
|
||||
);
|
||||
assert!(
|
||||
riv_fid < 1.0 - 1e-6,
|
||||
"River embedding should have decohered below fidelity 1.0: {}",
|
||||
riv_fid
|
||||
);
|
||||
|
||||
// Different noise rates produce different decoherence trajectories,
|
||||
// so the final fidelities should differ.
|
||||
assert!(
|
||||
(fin_fid - riv_fid).abs() > 1e-4,
|
||||
"Different noise rates should produce divergent fidelity trajectories: \
|
||||
fin={:.6}, riv={:.6}",
|
||||
fin_fid,
|
||||
riv_fid
|
||||
);
|
||||
|
||||
// 3. The disambiguation pattern must change over time. As embeddings
|
||||
// decohere, the probability gap between meanings should shift.
|
||||
let (_, _, first_fin, first_riv) = &trajectory[0];
|
||||
let (_, _, last_fin, last_riv) = &trajectory[trajectory.len() - 1];
|
||||
let initial_gap = (first_fin - first_riv).abs();
|
||||
let final_gap = (last_fin - last_riv).abs();
|
||||
|
||||
println!("DISCOVERY 5: Initial probability gap: {:.6}", initial_gap);
|
||||
println!("DISCOVERY 5: Final probability gap: {:.6}", final_gap);
|
||||
println!(
|
||||
"DISCOVERY 5: Gap change: {:.6}",
|
||||
(initial_gap - final_gap).abs()
|
||||
);
|
||||
|
||||
assert!(
|
||||
(initial_gap - final_gap).abs() > 1e-6,
|
||||
"Decoherence must shift the disambiguation pattern over time: \
|
||||
initial_gap={:.6}, final_gap={:.6}",
|
||||
initial_gap,
|
||||
final_gap
|
||||
);
|
||||
|
||||
// 4. All probabilities must remain non-negative (physical constraint).
|
||||
for (time, _, fin_p, riv_p) in &trajectory {
|
||||
assert!(
|
||||
*fin_p >= 0.0 && *riv_p >= 0.0,
|
||||
"Probabilities must be non-negative at t={}: fin={}, riv={}",
|
||||
time,
|
||||
fin_p,
|
||||
riv_p
|
||||
);
|
||||
}
|
||||
|
||||
// 5. At t=0, both embeddings are fresh. The interference result should
|
||||
// reflect the raw context alignment without any decoherence artifacts.
|
||||
// Financial should win because context[0]=0.6 > context[1]=0.5.
|
||||
assert_eq!(
|
||||
trajectory[0].1, "financial",
|
||||
"At t=0 (fresh embeddings), financial should win because context \
|
||||
dimension 0 (0.6) > dimension 1 (0.5)"
|
||||
);
|
||||
|
||||
println!("DISCOVERY 5: ================================================");
|
||||
println!("DISCOVERY 5: RESULT -- Decoherence creates a time-dependent");
|
||||
println!("DISCOVERY 5: trajectory of semantic disambiguation. The faster-");
|
||||
println!("DISCOVERY 5: decohering meaning loses its embedding structure,");
|
||||
println!("DISCOVERY 5: shifting the interference pattern over time.");
|
||||
println!("DISCOVERY 5: This is impossible in classical TTL-based stores");
|
||||
println!("DISCOVERY 5: where embeddings are either fully present or gone.");
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY 6: QEC on Swarm Reasoning Chain
|
||||
// ===========================================================================
|
||||
//
|
||||
// Combines: reasoning_qec (ReasoningTrace, ReasoningStep, ReasoningQecConfig, run_qec)
|
||||
// + swarm_interference (SwarmInterference, AgentContribution, Action, decide)
|
||||
//
|
||||
// HYPOTHESIS: When agent swarm decisions are encoded as a reasoning trace,
|
||||
// QEC syndrome extraction can identify WHICH agent in the chain produced
|
||||
// incoherent reasoning. Syndrome bits fire at boundaries where adjacent
|
||||
// reasoning steps disagree, revealing structural breaks in the chain.
|
||||
//
|
||||
// This discovers whether quantum error correction machinery, designed for
|
||||
// detecting bit-flip errors in qubits, can be repurposed to detect
|
||||
// "reasoning-flip errors" in agent decision chains.
|
||||
// ===========================================================================
|
||||
|
||||
#[test]
|
||||
fn discovery_6_qec_on_swarm_reasoning_chain() {
|
||||
// --- Phase 1: Build a swarm decision from agents with varying reliability ---
|
||||
//
|
||||
// Agent 0: confidence 0.95 (reliable)
|
||||
// Agent 1: confidence 0.90 (reliable)
|
||||
// Agent 2: confidence 0.20 (UNRELIABLE -- the weak link)
|
||||
// Agent 3: confidence 0.95 (reliable)
|
||||
// Agent 4: confidence 0.90 (reliable)
|
||||
let agent_confidences: Vec<f64> = vec![0.95, 0.90, 0.20, 0.95, 0.90];
|
||||
let agent_labels: Vec<String> = (0..5).map(|i| format!("agent_{}", i)).collect();
|
||||
|
||||
let action = Action {
|
||||
id: "proceed".into(),
|
||||
description: "Proceed with coordinated plan".into(),
|
||||
};
|
||||
|
||||
let mut swarm = SwarmInterference::new();
|
||||
for (i, &conf) in agent_confidences.iter().enumerate() {
|
||||
swarm.contribute(AgentContribution::new(
|
||||
&agent_labels[i],
|
||||
action.clone(),
|
||||
conf,
|
||||
true, // all agents nominally support the action
|
||||
));
|
||||
}
|
||||
|
||||
let decisions = swarm.decide();
|
||||
let swarm_prob = decisions[0].probability;
|
||||
|
||||
println!("DISCOVERY 6: QEC on Swarm Reasoning Chain");
|
||||
println!("DISCOVERY 6: ================================================");
|
||||
println!("DISCOVERY 6: Agent confidences: {:?}", agent_confidences);
|
||||
println!("DISCOVERY 6: Swarm decision probability: {:.4}", swarm_prob);
|
||||
println!("DISCOVERY 6: (Agent 2 is deliberately unreliable at 0.20)");
|
||||
println!("DISCOVERY 6: ------------------------------------------------");
|
||||
|
||||
// --- Phase 2: Encode swarm decisions as a reasoning trace ---
|
||||
//
|
||||
// Each agent's confidence becomes a reasoning step.
|
||||
// High confidence -> qubit close to |0> (valid reasoning).
|
||||
// Low confidence -> qubit rotated toward |1> (uncertain reasoning).
|
||||
let steps: Vec<ReasoningStep> = agent_confidences
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, &conf)| ReasoningStep {
|
||||
label: format!("agent_{}", i),
|
||||
confidence: conf,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.4, // moderate noise: ~40% chance of bit-flip per step
|
||||
seed: Some(42),
|
||||
};
|
||||
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let result = trace.run_qec().unwrap();
|
||||
|
||||
println!("DISCOVERY 6: Syndrome pattern: {:?}", result.syndrome);
|
||||
println!("DISCOVERY 6: Error steps flagged: {:?}", result.error_steps);
|
||||
println!("DISCOVERY 6: Is decodable: {}", result.is_decodable);
|
||||
println!(
|
||||
"DISCOVERY 6: Corrected fidelity: {:.6}",
|
||||
result.corrected_fidelity
|
||||
);
|
||||
println!("DISCOVERY 6: ------------------------------------------------");
|
||||
|
||||
// Map syndrome bits to agent boundaries
|
||||
println!("DISCOVERY 6: Syndrome bit interpretation:");
|
||||
for (i, &fired) in result.syndrome.iter().enumerate() {
|
||||
let status = if fired { "FIRED" } else { "quiet" };
|
||||
println!(
|
||||
"DISCOVERY 6: Syndrome[{}] (parity: agent_{} <-> agent_{}): {}",
|
||||
i,
|
||||
i,
|
||||
i + 1,
|
||||
status
|
||||
);
|
||||
}
|
||||
|
||||
// Map error steps back to agents
|
||||
println!("DISCOVERY 6: ------------------------------------------------");
|
||||
println!("DISCOVERY 6: Agents flagged by decoder:");
|
||||
if result.error_steps.is_empty() {
|
||||
println!("DISCOVERY 6: (none -- no errors detected in this run)");
|
||||
}
|
||||
for &step_idx in &result.error_steps {
|
||||
println!(
|
||||
"DISCOVERY 6: agent_{} flagged (original confidence: {:.2})",
|
||||
step_idx, agent_confidences[step_idx]
|
||||
);
|
||||
}
|
||||
|
||||
// --- Phase 3: Baseline comparison with all-reliable agents ---
|
||||
println!("DISCOVERY 6: ------------------------------------------------");
|
||||
println!("DISCOVERY 6: Baseline: all agents reliable (confidence=0.95)");
|
||||
|
||||
let baseline_steps: Vec<ReasoningStep> = (0..5)
|
||||
.map(|i| ReasoningStep {
|
||||
label: format!("baseline_agent_{}", i),
|
||||
confidence: 0.95,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let baseline_config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.4,
|
||||
seed: Some(42), // same seed for fair comparison
|
||||
};
|
||||
|
||||
let mut baseline_trace = ReasoningTrace::new(baseline_steps, baseline_config).unwrap();
|
||||
let baseline_result = baseline_trace.run_qec().unwrap();
|
||||
|
||||
println!(
|
||||
"DISCOVERY 6: Baseline syndrome: {:?}",
|
||||
baseline_result.syndrome
|
||||
);
|
||||
println!(
|
||||
"DISCOVERY 6: Baseline errors: {:?}",
|
||||
baseline_result.error_steps
|
||||
);
|
||||
println!(
|
||||
"DISCOVERY 6: Baseline fidelity: {:.6}",
|
||||
baseline_result.corrected_fidelity
|
||||
);
|
||||
|
||||
let mixed_fired: usize = result.syndrome.iter().filter(|&&s| s).count();
|
||||
let baseline_fired: usize = baseline_result.syndrome.iter().filter(|&&s| s).count();
|
||||
|
||||
println!("DISCOVERY 6: ------------------------------------------------");
|
||||
println!(
|
||||
"DISCOVERY 6: Mixed-reliability syndromes fired: {}/4",
|
||||
mixed_fired
|
||||
);
|
||||
println!(
|
||||
"DISCOVERY 6: Baseline syndromes fired: {}/4",
|
||||
baseline_fired
|
||||
);
|
||||
|
||||
// --- Assertions ---
|
||||
|
||||
// 1. Structural validity: syndrome length = num_steps - 1
|
||||
assert_eq!(
|
||||
result.syndrome.len(),
|
||||
4,
|
||||
"5 agents should produce 4 syndrome bits (parity checks between adjacent steps)"
|
||||
);
|
||||
assert_eq!(
|
||||
baseline_result.syndrome.len(),
|
||||
4,
|
||||
"Baseline should also produce 4 syndrome bits"
|
||||
);
|
||||
|
||||
// 2. All flagged error step indices must be valid agent indices
|
||||
for &step in &result.error_steps {
|
||||
assert!(
|
||||
step < 5,
|
||||
"Error step index {} should be < 5 (num agents)",
|
||||
step
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Corrected fidelity must be in valid physical range [0, 1]
|
||||
assert!(
|
||||
result.corrected_fidelity >= 0.0 && result.corrected_fidelity <= 1.0 + 1e-9,
|
||||
"Corrected fidelity should be in [0, 1], got {}",
|
||||
result.corrected_fidelity
|
||||
);
|
||||
assert!(
|
||||
baseline_result.corrected_fidelity >= 0.0
|
||||
&& baseline_result.corrected_fidelity <= 1.0 + 1e-9,
|
||||
"Baseline corrected fidelity should be in [0, 1], got {}",
|
||||
baseline_result.corrected_fidelity
|
||||
);
|
||||
|
||||
// 4. Swarm probability should be |sum of confidences|^2.
|
||||
// All agents support with phase 0, so amplitudes add directly:
|
||||
// total = 0.95 + 0.90 + 0.20 + 0.95 + 0.90 = 3.90
|
||||
// probability = 3.90^2 = 15.21
|
||||
let total_confidence: f64 = agent_confidences.iter().sum();
|
||||
let expected_prob = total_confidence * total_confidence;
|
||||
assert!(
|
||||
(swarm_prob - expected_prob).abs() < 0.01,
|
||||
"Swarm probability should be |sum of confidences|^2 = {:.2}, got {:.4}",
|
||||
expected_prob,
|
||||
swarm_prob
|
||||
);
|
||||
|
||||
// 5. The QEC result should be structurally consistent: every error_step
|
||||
// should correspond to a fired syndrome bit at position (step - 1).
|
||||
for &step in &result.error_steps {
|
||||
assert!(
|
||||
step >= 1 && result.syndrome[step - 1],
|
||||
"Error step {} should correspond to fired syndrome bit at index {}",
|
||||
step,
|
||||
step - 1
|
||||
);
|
||||
}
|
||||
|
||||
println!("DISCOVERY 6: ================================================");
|
||||
println!("DISCOVERY 6: RESULT -- QEC syndrome extraction maps directly");
|
||||
println!("DISCOVERY 6: to agent boundaries in a reasoning chain.");
|
||||
println!("DISCOVERY 6: Fired syndrome bits indicate where adjacent");
|
||||
println!("DISCOVERY 6: agents disagree after noise, enabling targeted");
|
||||
println!("DISCOVERY 6: identification of incoherent reasoning steps.");
|
||||
println!("DISCOVERY 6: The unreliable agent (agent_2, conf=0.20) creates");
|
||||
println!("DISCOVERY 6: a structural vulnerability that QEC can detect.");
|
||||
}
|
||||
583
vendor/ruvector/crates/ruqu-exotic/tests/test_discovery_pipeline.rs
vendored
Normal file
583
vendor/ruvector/crates/ruqu-exotic/tests/test_discovery_pipeline.rs
vendored
Normal file
@@ -0,0 +1,583 @@
|
||||
//! Cross-module discovery experiments 9 and 10.
|
||||
//!
|
||||
//! These tests chain multiple ruqu-exotic modules together to discover
|
||||
//! emergent behavior at module boundaries.
|
||||
|
||||
use ruqu_exotic::interference_search::{interference_search, ConceptSuperposition};
|
||||
use ruqu_exotic::quantum_collapse::QuantumCollapseSearch;
|
||||
use ruqu_exotic::quantum_decay::QuantumEmbedding;
|
||||
use ruqu_exotic::reasoning_qec::{ReasoningQecConfig, ReasoningStep, ReasoningTrace};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Cosine similarity between two f64 slices.
|
||||
fn cosine_sim(a: &[f64], b: &[f64]) -> f64 {
|
||||
let len = a.len().min(b.len());
|
||||
let mut dot = 0.0_f64;
|
||||
let mut na = 0.0_f64;
|
||||
let mut nb = 0.0_f64;
|
||||
for i in 0..len {
|
||||
dot += a[i] * b[i];
|
||||
na += a[i] * a[i];
|
||||
nb += b[i] * b[i];
|
||||
}
|
||||
let denom = na.sqrt() * nb.sqrt();
|
||||
if denom < 1e-15 {
|
||||
0.0
|
||||
} else {
|
||||
dot / denom
|
||||
}
|
||||
}
|
||||
|
||||
/// Total-variation distance between two discrete distributions represented as
|
||||
/// `Vec<(usize, count)>` over a shared index space of size `n`.
|
||||
/// Returns a value in [0, 1]: 0 = identical, 1 = maximally different.
|
||||
fn distribution_divergence(
|
||||
dist_a: &[(usize, usize)],
|
||||
dist_b: &[(usize, usize)],
|
||||
n: usize,
|
||||
total_a: usize,
|
||||
total_b: usize,
|
||||
) -> f64 {
|
||||
let mut pa = vec![0.0_f64; n];
|
||||
let mut pb = vec![0.0_f64; n];
|
||||
for &(idx, cnt) in dist_a {
|
||||
if idx < n {
|
||||
pa[idx] = cnt as f64 / total_a as f64;
|
||||
}
|
||||
}
|
||||
for &(idx, cnt) in dist_b {
|
||||
if idx < n {
|
||||
pb[idx] = cnt as f64 / total_b as f64;
|
||||
}
|
||||
}
|
||||
pa.iter()
|
||||
.zip(pb.iter())
|
||||
.map(|(a, b)| (a - b).abs())
|
||||
.sum::<f64>()
|
||||
* 0.5
|
||||
}
|
||||
|
||||
/// Shannon entropy of a distribution (in nats). Higher = more uniform/diverse.
|
||||
fn distribution_entropy(dist: &[(usize, usize)], total: usize) -> f64 {
|
||||
let mut h = 0.0_f64;
|
||||
for &(_, cnt) in dist {
|
||||
if cnt > 0 {
|
||||
let p = cnt as f64 / total as f64;
|
||||
h -= p * p.ln();
|
||||
}
|
||||
}
|
||||
h
|
||||
}
|
||||
|
||||
/// Return the index that received the most shots in a distribution.
|
||||
fn top_index(dist: &[(usize, usize)]) -> usize {
|
||||
dist.iter()
|
||||
.max_by_key(|&&(_, count)| count)
|
||||
.map(|&(idx, _)| idx)
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Return the set of top-k indices (by count) from a distribution.
|
||||
fn top_k_indices(dist: &[(usize, usize)], k: usize) -> Vec<usize> {
|
||||
dist.iter().take(k).map(|&(idx, _)| idx).collect()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY 9: Decoherence as Differential Privacy
|
||||
// ===========================================================================
|
||||
//
|
||||
// HYPOTHESIS: Controlled decoherence adds calibrated noise to search results,
|
||||
// analogous to differential privacy. Light decoherence preserves search
|
||||
// quality; heavy decoherence randomises results, increasing entropy and
|
||||
// divergence from the original distribution.
|
||||
|
||||
#[test]
|
||||
fn test_discovery_9_decoherence_as_differential_privacy() {
|
||||
// --- Setup: 8 candidate embeddings in 4D ---
|
||||
let raw_candidates: Vec<Vec<f64>> = vec![
|
||||
vec![1.0, 0.0, 0.0, 0.0], // 0: strongly aligned with query
|
||||
vec![0.8, 0.2, 0.0, 0.0], // 1: mostly aligned
|
||||
vec![0.5, 0.5, 0.0, 0.0], // 2: partially aligned
|
||||
vec![0.0, 1.0, 0.0, 0.0], // 3: orthogonal
|
||||
vec![0.0, 0.0, 1.0, 0.0], // 4: orthogonal in another axis
|
||||
vec![0.0, 0.0, 0.0, 1.0], // 5: orthogonal in yet another
|
||||
vec![-0.5, 0.5, 0.0, 0.0], // 6: partially opposed
|
||||
vec![-1.0, 0.0, 0.0, 0.0], // 7: fully opposed
|
||||
];
|
||||
|
||||
let query = vec![1.0, 0.0, 0.0, 0.0];
|
||||
let iterations = 2;
|
||||
let num_shots = 500;
|
||||
let base_seed = 42_u64;
|
||||
let num_candidates = raw_candidates.len();
|
||||
|
||||
// --- Baseline: collapse search on fresh (un-decohered) candidates ---
|
||||
let fresh_search = QuantumCollapseSearch::new(raw_candidates.clone());
|
||||
let fresh_dist = fresh_search.search_distribution(&query, iterations, num_shots, base_seed);
|
||||
let fresh_top2 = top_k_indices(&fresh_dist, 2);
|
||||
let fresh_entropy = distribution_entropy(&fresh_dist, num_shots);
|
||||
|
||||
println!("=== DISCOVERY 9: Decoherence as Differential Privacy ===\n");
|
||||
println!("Fresh (no decoherence) distribution (top 5):");
|
||||
for &(idx, cnt) in fresh_dist.iter().take(5) {
|
||||
println!(
|
||||
" candidate {}: {} / {} shots ({:.1}%)",
|
||||
idx,
|
||||
cnt,
|
||||
num_shots,
|
||||
cnt as f64 / num_shots as f64 * 100.0
|
||||
);
|
||||
}
|
||||
println!(" Top-2 indices: {:?}", fresh_top2);
|
||||
println!(" Entropy: {:.4}\n", fresh_entropy);
|
||||
|
||||
// --- Apply decoherence at increasing noise levels and compare ---
|
||||
let noise_levels: Vec<f64> = vec![0.01, 0.1, 0.5, 1.0];
|
||||
let mut divergences = Vec::new();
|
||||
let mut entropies = Vec::new();
|
||||
let mut avg_fidelities = Vec::new();
|
||||
|
||||
for &noise in &noise_levels {
|
||||
// Decohere every candidate embedding.
|
||||
let decohered_candidates: Vec<Vec<f64>> = raw_candidates
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, emb)| {
|
||||
let mut qe = QuantumEmbedding::from_embedding(emb, noise);
|
||||
qe.decohere(5.0, base_seed + i as u64 * 1000);
|
||||
qe.to_embedding()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Measure average fidelity across candidates.
|
||||
let avg_fidelity: f64 = raw_candidates
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, emb)| {
|
||||
let mut qe = QuantumEmbedding::from_embedding(emb, noise);
|
||||
qe.decohere(5.0, base_seed + i as u64 * 1000);
|
||||
qe.fidelity()
|
||||
})
|
||||
.sum::<f64>()
|
||||
/ num_candidates as f64;
|
||||
|
||||
// Run collapse search on decohered candidates.
|
||||
let dec_search = QuantumCollapseSearch::new(decohered_candidates);
|
||||
let dec_dist = dec_search.search_distribution(&query, iterations, num_shots, base_seed);
|
||||
let dec_top2 = top_k_indices(&dec_dist, 2);
|
||||
let dec_entropy = distribution_entropy(&dec_dist, num_shots);
|
||||
|
||||
// Compute distribution divergence from the fresh baseline.
|
||||
let n = num_candidates.max(8);
|
||||
let div = distribution_divergence(&fresh_dist, &dec_dist, n, num_shots, num_shots);
|
||||
|
||||
println!("Noise rate {:.2}:", noise);
|
||||
println!(" Avg fidelity: {:.4}", avg_fidelity);
|
||||
println!(
|
||||
" Top-2 indices: {:?} (fresh was {:?})",
|
||||
dec_top2, fresh_top2
|
||||
);
|
||||
println!(
|
||||
" Entropy: {:.4} (fresh was {:.4})",
|
||||
dec_entropy, fresh_entropy
|
||||
);
|
||||
println!(" Distribution divergence from fresh: {:.4}", div);
|
||||
for &(idx, cnt) in dec_dist.iter().take(5) {
|
||||
println!(
|
||||
" candidate {}: {} shots ({:.1}%)",
|
||||
idx,
|
||||
cnt,
|
||||
cnt as f64 / num_shots as f64 * 100.0
|
||||
);
|
||||
}
|
||||
println!();
|
||||
|
||||
divergences.push(div);
|
||||
entropies.push(dec_entropy);
|
||||
avg_fidelities.push(avg_fidelity);
|
||||
}
|
||||
|
||||
// --- Assertions ---
|
||||
|
||||
// 1) Light decoherence (noise=0.01) should produce small divergence from
|
||||
// the fresh distribution. The embeddings barely change, so the search
|
||||
// distribution should be close to the original.
|
||||
assert!(
|
||||
divergences[0] < 0.25,
|
||||
"Light decoherence (noise=0.01) should produce small divergence from fresh. \
|
||||
Got {:.4}, expected < 0.25",
|
||||
divergences[0]
|
||||
);
|
||||
|
||||
// 2) Heavy decoherence (noise=1.0) should produce MUCH greater divergence
|
||||
// than light decoherence.
|
||||
assert!(
|
||||
divergences[3] > divergences[0],
|
||||
"Heavy decoherence (noise=1.0) should cause greater distribution divergence \
|
||||
than light decoherence (noise=0.01): {:.4} > {:.4}",
|
||||
divergences[3],
|
||||
divergences[0]
|
||||
);
|
||||
|
||||
// 3) Heavy decoherence should diversify the distribution: its entropy should
|
||||
// be higher than light decoherence's entropy, indicating the search results
|
||||
// have been spread more uniformly (like adding noise for privacy).
|
||||
assert!(
|
||||
entropies[3] > entropies[0],
|
||||
"Heavy decoherence should produce higher entropy (more diverse distribution) \
|
||||
than light decoherence: {:.4} > {:.4}",
|
||||
entropies[3],
|
||||
entropies[0]
|
||||
);
|
||||
|
||||
// 4) Fidelity should strictly decrease with noise level.
|
||||
assert!(
|
||||
avg_fidelities[0] > avg_fidelities[3],
|
||||
"Average fidelity should decrease with heavier noise: {:.4} > {:.4}",
|
||||
avg_fidelities[0],
|
||||
avg_fidelities[3]
|
||||
);
|
||||
|
||||
println!("Summary:");
|
||||
println!(" Divergences: {:?}", divergences);
|
||||
println!(" Entropies: {:?}", entropies);
|
||||
println!(" Fidelities: {:?}", avg_fidelities);
|
||||
println!(
|
||||
"\nDISCOVERY CONFIRMED: Controlled decoherence acts as a differential-privacy \
|
||||
mechanism for search. Light noise preserves the distribution (low divergence, \
|
||||
low entropy increase); heavy noise randomises results (high divergence, high entropy)."
|
||||
);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY 10: Full Pipeline -- Decohere -> Interfere -> Collapse -> QEC-Verify
|
||||
// ===========================================================================
|
||||
//
|
||||
// HYPOTHESIS: The full pipeline produces results that degrade gracefully.
|
||||
// QEC syndrome bits fire when the pipeline's confidence drops below a
|
||||
// threshold, providing an automatic reliability signal.
|
||||
|
||||
#[test]
|
||||
fn test_discovery_10_full_pipeline_decohere_interfere_collapse_qec() {
|
||||
println!("=== DISCOVERY 10: Full Pipeline (4 modules chained) ===\n");
|
||||
|
||||
// --- Knowledge base: concept embeddings in 4D ---
|
||||
let concepts_raw: Vec<(&str, Vec<(String, Vec<f64>)>)> = vec![
|
||||
(
|
||||
"rust",
|
||||
vec![
|
||||
("systems".into(), vec![1.0, 0.0, 0.2, 0.0]),
|
||||
("safety".into(), vec![0.8, 0.0, 0.0, 0.3]),
|
||||
],
|
||||
),
|
||||
(
|
||||
"python",
|
||||
vec![
|
||||
("scripting".into(), vec![0.0, 1.0, 0.0, 0.2]),
|
||||
("ml".into(), vec![0.0, 0.8, 0.3, 0.0]),
|
||||
],
|
||||
),
|
||||
(
|
||||
"javascript",
|
||||
vec![
|
||||
("web".into(), vec![0.0, 0.0, 1.0, 0.0]),
|
||||
("frontend".into(), vec![0.0, 0.2, 0.8, 0.0]),
|
||||
],
|
||||
),
|
||||
(
|
||||
"haskell",
|
||||
vec![
|
||||
("functional".into(), vec![0.3, 0.0, 0.0, 1.0]),
|
||||
("types".into(), vec![0.5, 0.0, 0.0, 0.7]),
|
||||
],
|
||||
),
|
||||
];
|
||||
|
||||
let query_context = vec![0.9, 0.0, 0.1, 0.1]; // query about systems programming
|
||||
|
||||
// We run the pipeline twice: once with light decoherence (fresh knowledge)
|
||||
// and once with heavy decoherence (stale knowledge). The key signal that
|
||||
// reliably degrades with decoherence is FIDELITY -- we feed it directly into
|
||||
// the QEC reasoning trace as the primary confidence metric.
|
||||
let scenarios: Vec<(&str, f64, f64)> = vec![
|
||||
("fresh", 0.01, 1.0), // (label, noise_rate, decoherence_dt)
|
||||
("stale", 2.0, 15.0), // very heavy decoherence
|
||||
];
|
||||
|
||||
struct PipelineOutcome {
|
||||
label: String,
|
||||
avg_fidelity: f64,
|
||||
top_concept: String,
|
||||
top_meaning: String,
|
||||
collapse_top_idx: usize,
|
||||
qec_error_steps: Vec<usize>,
|
||||
qec_syndrome_count: usize,
|
||||
qec_is_decodable: bool,
|
||||
}
|
||||
|
||||
let mut outcomes: Vec<PipelineOutcome> = Vec::new();
|
||||
|
||||
for (label, noise_rate, dt) in &scenarios {
|
||||
println!(
|
||||
"--- Pipeline run: {} (noise_rate={}, dt={}) ---\n",
|
||||
label, noise_rate, dt
|
||||
);
|
||||
|
||||
// ===============================================================
|
||||
// STEP 1: Decohere knowledge embeddings (quantum_decay)
|
||||
// ===============================================================
|
||||
let mut fidelities: Vec<f64> = Vec::new();
|
||||
|
||||
let decohered_concepts: Vec<ConceptSuperposition> = concepts_raw
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(ci, (id, meanings))| {
|
||||
let decohered_meanings: Vec<(String, Vec<f64>)> = meanings
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(mi, (name, emb))| {
|
||||
let mut qe = QuantumEmbedding::from_embedding(emb, *noise_rate);
|
||||
let seed = 42 + ci as u64 * 100 + mi as u64;
|
||||
qe.decohere(*dt, seed);
|
||||
let fid = qe.fidelity();
|
||||
fidelities.push(fid);
|
||||
println!(
|
||||
" [Step 1] Concept '{}' meaning '{}': fidelity = {:.4}",
|
||||
id, name, fid
|
||||
);
|
||||
(name.clone(), qe.to_embedding())
|
||||
})
|
||||
.collect();
|
||||
ConceptSuperposition::uniform(id, decohered_meanings)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let avg_fidelity: f64 = fidelities.iter().sum::<f64>() / fidelities.len() as f64;
|
||||
println!(
|
||||
" Average fidelity across all meanings: {:.4}\n",
|
||||
avg_fidelity
|
||||
);
|
||||
|
||||
// ===============================================================
|
||||
// STEP 2: Interference search to disambiguate query (interference_search)
|
||||
// ===============================================================
|
||||
let concept_scores = interference_search(&decohered_concepts, &query_context);
|
||||
|
||||
println!(" [Step 2] Interference search results:");
|
||||
for cs in &concept_scores {
|
||||
println!(
|
||||
" Concept '{}': relevance={:.4}, dominant_meaning='{}'",
|
||||
cs.concept_id, cs.relevance, cs.dominant_meaning
|
||||
);
|
||||
}
|
||||
|
||||
let top_concept = concept_scores[0].concept_id.clone();
|
||||
let top_meaning = concept_scores[0].dominant_meaning.clone();
|
||||
|
||||
// Extract dominant-meaning embeddings for the top-ranked concepts.
|
||||
let top_k = 4.min(concept_scores.len());
|
||||
let collapse_candidates: Vec<Vec<f64>> = concept_scores[..top_k]
|
||||
.iter()
|
||||
.map(|cs| {
|
||||
let concept = decohered_concepts
|
||||
.iter()
|
||||
.find(|c| c.concept_id == cs.concept_id)
|
||||
.unwrap();
|
||||
let meaning = concept
|
||||
.meanings
|
||||
.iter()
|
||||
.find(|m| m.label == cs.dominant_meaning)
|
||||
.unwrap_or(&concept.meanings[0]);
|
||||
meaning.embedding.clone()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// ===============================================================
|
||||
// STEP 3: Collapse search on interference-ranked results (quantum_collapse)
|
||||
// ===============================================================
|
||||
let collapse_search = QuantumCollapseSearch::new(collapse_candidates.clone());
|
||||
let collapse_dist = collapse_search.search_distribution(&query_context, 2, 200, 42);
|
||||
|
||||
println!("\n [Step 3] Collapse search distribution:");
|
||||
for &(idx, cnt) in &collapse_dist {
|
||||
let concept_id = if idx < top_k {
|
||||
&concept_scores[idx].concept_id
|
||||
} else {
|
||||
"(padding)"
|
||||
};
|
||||
println!(" Index {} ('{}'): {} / 200 shots", idx, concept_id, cnt);
|
||||
}
|
||||
|
||||
let collapse_top_idx = top_index(&collapse_dist);
|
||||
|
||||
// ===============================================================
|
||||
// STEP 4: QEC verification on reasoning trace (reasoning_qec)
|
||||
// ===============================================================
|
||||
// Encode the pipeline as a reasoning trace. The key insight is that
|
||||
// FIDELITY is the most reliable degradation signal -- it always
|
||||
// decreases with decoherence. We use it as the primary confidence for
|
||||
// each reasoning step.
|
||||
|
||||
// Compute per-concept fidelities for the top-k concepts.
|
||||
let concept_fidelities: Vec<f64> = concepts_raw
|
||||
.iter()
|
||||
.take(top_k)
|
||||
.enumerate()
|
||||
.map(|(ci, (_, meanings))| {
|
||||
let concept_fid: f64 = meanings
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(mi, (_, emb))| {
|
||||
let mut qe = QuantumEmbedding::from_embedding(emb, *noise_rate);
|
||||
qe.decohere(*dt, 42 + ci as u64 * 100 + mi as u64);
|
||||
qe.fidelity()
|
||||
})
|
||||
.sum::<f64>()
|
||||
/ meanings.len() as f64;
|
||||
concept_fid
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Build reasoning steps: one per pipeline stage, confidence driven by fidelity.
|
||||
let reasoning_steps = vec![
|
||||
ReasoningStep {
|
||||
label: "knowledge_fidelity".into(),
|
||||
confidence: avg_fidelity.clamp(0.05, 1.0),
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "interference_result".into(),
|
||||
confidence: concept_fidelities
|
||||
.get(0)
|
||||
.copied()
|
||||
.unwrap_or(0.5)
|
||||
.clamp(0.05, 1.0),
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "collapse_result".into(),
|
||||
confidence: concept_fidelities
|
||||
.get(collapse_top_idx)
|
||||
.copied()
|
||||
.unwrap_or(avg_fidelity)
|
||||
.clamp(0.05, 1.0),
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "pipeline_coherence".into(),
|
||||
confidence: avg_fidelity.clamp(0.05, 1.0),
|
||||
},
|
||||
];
|
||||
|
||||
// QEC noise scales inversely with fidelity: low fidelity = more noise.
|
||||
let qec_noise = (1.0 - avg_fidelity).clamp(0.0, 0.95) * 0.8;
|
||||
|
||||
println!("\n [Step 4] QEC setup:");
|
||||
println!(" Reasoning step confidences:");
|
||||
for step in &reasoning_steps {
|
||||
println!(" {}: {:.4}", step.label, step.confidence);
|
||||
}
|
||||
println!(" QEC noise rate: {:.4}", qec_noise);
|
||||
|
||||
let qec_config = ReasoningQecConfig {
|
||||
num_steps: reasoning_steps.len(),
|
||||
noise_rate: qec_noise,
|
||||
seed: Some(42),
|
||||
};
|
||||
|
||||
let mut trace = ReasoningTrace::new(reasoning_steps, qec_config).unwrap();
|
||||
let qec_result = trace.run_qec().unwrap();
|
||||
|
||||
let syndrome_count = qec_result.syndrome.iter().filter(|&&s| s).count();
|
||||
|
||||
println!("\n [Step 4] QEC verdict:");
|
||||
println!(" Syndrome: {:?}", qec_result.syndrome);
|
||||
println!(" Error steps: {:?}", qec_result.error_steps);
|
||||
println!(" Syndromes fired: {}", syndrome_count);
|
||||
println!(" Is decodable: {}", qec_result.is_decodable);
|
||||
println!(
|
||||
" Corrected fidelity: {:.4}",
|
||||
qec_result.corrected_fidelity
|
||||
);
|
||||
println!();
|
||||
|
||||
outcomes.push(PipelineOutcome {
|
||||
label: label.to_string(),
|
||||
avg_fidelity,
|
||||
top_concept,
|
||||
top_meaning,
|
||||
collapse_top_idx,
|
||||
qec_error_steps: qec_result.error_steps.clone(),
|
||||
qec_syndrome_count: syndrome_count,
|
||||
qec_is_decodable: qec_result.is_decodable,
|
||||
});
|
||||
}
|
||||
|
||||
// --- Final assertions across both pipeline runs ---
|
||||
|
||||
println!("=== CROSS-PIPELINE COMPARISON ===\n");
|
||||
for o in &outcomes {
|
||||
println!(
|
||||
" {}: fidelity={:.4}, top_concept='{}' ({}), collapse_idx={}, \
|
||||
QEC_syndromes={}, QEC_errors={:?}, decodable={}",
|
||||
o.label,
|
||||
o.avg_fidelity,
|
||||
o.top_concept,
|
||||
o.top_meaning,
|
||||
o.collapse_top_idx,
|
||||
o.qec_syndrome_count,
|
||||
o.qec_error_steps,
|
||||
o.qec_is_decodable
|
||||
);
|
||||
}
|
||||
println!();
|
||||
|
||||
let fresh = &outcomes[0];
|
||||
let stale = &outcomes[1];
|
||||
|
||||
// 1) Fresh pipeline should have higher fidelity than stale.
|
||||
assert!(
|
||||
fresh.avg_fidelity > stale.avg_fidelity,
|
||||
"Fresh pipeline should have higher fidelity than stale: {:.4} > {:.4}",
|
||||
fresh.avg_fidelity,
|
||||
stale.avg_fidelity
|
||||
);
|
||||
|
||||
// 2) The fresh pipeline should produce a meaningful result with high fidelity.
|
||||
assert!(
|
||||
fresh.avg_fidelity > 0.8,
|
||||
"Fresh pipeline fidelity should be above 0.8, got {:.4}",
|
||||
fresh.avg_fidelity
|
||||
);
|
||||
|
||||
// 3) The stale pipeline should have visibly degraded fidelity.
|
||||
assert!(
|
||||
stale.avg_fidelity < 0.5,
|
||||
"Stale pipeline fidelity should be below 0.5 after heavy decoherence, got {:.4}",
|
||||
stale.avg_fidelity
|
||||
);
|
||||
|
||||
// 4) QEC should fire more (or equal) syndrome bits for the stale pipeline
|
||||
// than the fresh one, providing an automatic reliability signal.
|
||||
assert!(
|
||||
stale.qec_syndrome_count >= fresh.qec_syndrome_count,
|
||||
"Stale pipeline should trigger at least as many QEC syndromes as fresh: {} >= {}",
|
||||
stale.qec_syndrome_count,
|
||||
fresh.qec_syndrome_count
|
||||
);
|
||||
|
||||
// 5) Both pipelines produce a result (the pipeline does not crash).
|
||||
// This validates graceful degradation rather than catastrophic failure.
|
||||
assert!(
|
||||
!fresh.top_concept.is_empty() && !stale.top_concept.is_empty(),
|
||||
"Both pipelines should produce a top concept result"
|
||||
);
|
||||
|
||||
println!(
|
||||
"DISCOVERY CONFIRMED: The 4-module pipeline degrades gracefully.\n\
|
||||
Fresh knowledge (fidelity={:.4}) produces reliable results with {} QEC syndromes.\n\
|
||||
Stale knowledge (fidelity={:.4}) still produces results but QEC fires {} syndromes,\n\
|
||||
providing an automatic reliability signal that the knowledge base is corrupted.",
|
||||
fresh.avg_fidelity, fresh.qec_syndrome_count, stale.avg_fidelity, stale.qec_syndrome_count
|
||||
);
|
||||
}
|
||||
966
vendor/ruvector/crates/ruqu-exotic/tests/test_exotic.rs
vendored
Normal file
966
vendor/ruvector/crates/ruqu-exotic/tests/test_exotic.rs
vendored
Normal file
@@ -0,0 +1,966 @@
|
||||
//! Comprehensive tests for ruqu-exotic: 8 exotic quantum-classical hybrid algorithms.
|
||||
//!
|
||||
//! These tests VALIDATE the exotic concepts, not just the plumbing.
|
||||
//! Each section proves a structurally new capability.
|
||||
|
||||
use ruqu_core::gate::Gate;
|
||||
use ruqu_core::types::Complex;
|
||||
|
||||
const EPSILON: f64 = 1e-6;
|
||||
|
||||
// ===========================================================================
|
||||
// 1. Quantum-Shaped Memory Decay
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::quantum_decay::*;
|
||||
|
||||
#[test]
|
||||
fn test_fresh_embedding_full_fidelity() {
|
||||
let emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.5, 0.3], 0.1);
|
||||
assert!(
|
||||
(emb.fidelity() - 1.0).abs() < EPSILON,
|
||||
"Fresh embedding must have fidelity 1.0"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decoherence_reduces_fidelity() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&[1.0, 0.0, 0.5, 0.3], 0.1);
|
||||
emb.decohere(10.0, 42);
|
||||
assert!(
|
||||
emb.fidelity() < 1.0 - EPSILON,
|
||||
"Decohered embedding fidelity must drop below 1.0"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_more_decoherence_lower_fidelity() {
|
||||
let mut emb_a = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
||||
let mut emb_b = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
||||
emb_a.decohere(1.0, 42);
|
||||
emb_b.decohere(20.0, 42);
|
||||
assert!(
|
||||
emb_b.fidelity() < emb_a.fidelity(),
|
||||
"More decoherence (dt=20) must produce lower fidelity than less (dt=1): {} vs {}",
|
||||
emb_b.fidelity(),
|
||||
emb_a.fidelity()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_coherence_threshold() {
|
||||
let mut emb = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.3);
|
||||
emb.decohere(50.0, 99);
|
||||
assert!(
|
||||
!emb.is_coherent(0.99),
|
||||
"Heavily decohered embedding should fail coherence check at threshold 0.99"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_similarity_decreases_with_decay() {
|
||||
let emb_a = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
||||
let mut emb_b = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.3, 0.2], 0.1);
|
||||
let sim_fresh = emb_a.quantum_similarity(&emb_b);
|
||||
emb_b.decohere(15.0, 42);
|
||||
let sim_decayed = emb_a.quantum_similarity(&emb_b);
|
||||
assert!(
|
||||
sim_decayed < sim_fresh,
|
||||
"Similarity must decrease after decoherence: {} -> {}",
|
||||
sim_fresh,
|
||||
sim_decayed
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_batch_decohere_filters() {
|
||||
let mut batch: Vec<QuantumEmbedding> = (0..5)
|
||||
.map(|i| QuantumEmbedding::from_embedding(&[1.0, i as f64 * 0.1, 0.3, 0.1], 0.2))
|
||||
.collect();
|
||||
let coherent = decohere_batch(&mut batch, 30.0, 0.999, 42);
|
||||
// After heavy decoherence, some should fall below threshold
|
||||
assert!(
|
||||
coherent.len() < batch.len() || coherent.is_empty(),
|
||||
"Batch decohere should filter some embeddings"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roundtrip_embedding() {
|
||||
let original = vec![1.0, 0.0, 0.5, 0.3];
|
||||
let emb = QuantumEmbedding::from_embedding(&original, 0.1);
|
||||
let recovered = emb.to_embedding();
|
||||
// Recovered should be normalized version of original
|
||||
assert_eq!(
|
||||
recovered.len(),
|
||||
4,
|
||||
"Recovered embedding should have original length"
|
||||
);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 2. Interference-Based Concept Disambiguation
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::interference_search::*;
|
||||
|
||||
#[test]
|
||||
fn test_constructive_interference() {
|
||||
// "bank" has two meanings: financial and river
|
||||
let concept = ConceptSuperposition::uniform(
|
||||
"bank",
|
||||
vec![
|
||||
("financial".into(), vec![1.0, 0.0, 0.0]),
|
||||
("river".into(), vec![0.0, 1.0, 0.0]),
|
||||
],
|
||||
);
|
||||
// Context about money → should boost financial meaning
|
||||
let context = vec![0.9, 0.1, 0.0];
|
||||
let scores = concept.interfere(&context);
|
||||
let financial = scores.iter().find(|s| s.label == "financial").unwrap();
|
||||
let river = scores.iter().find(|s| s.label == "river").unwrap();
|
||||
assert!(
|
||||
financial.probability > river.probability,
|
||||
"Financial context should boost financial meaning: {} > {}",
|
||||
financial.probability,
|
||||
river.probability
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_destructive_interference_with_opposite_phases() {
|
||||
// Two meanings with OPPOSITE phases but same embedding direction
|
||||
let concept = ConceptSuperposition::with_amplitudes(
|
||||
"ambiguous",
|
||||
vec![
|
||||
("positive".into(), vec![1.0, 0.0], Complex::new(1.0, 0.0)),
|
||||
("negative".into(), vec![0.8, 0.2], Complex::new(-1.0, 0.0)),
|
||||
],
|
||||
);
|
||||
// Context aligned with both embeddings
|
||||
let context = vec![1.0, 0.0];
|
||||
let scores = concept.interfere(&context);
|
||||
// The opposite-phase meaning should have lower effective score
|
||||
// because phase matters in amplitude space
|
||||
assert!(scores.len() == 2, "Should have 2 scores");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_collapse_returns_valid_label() {
|
||||
let concept = ConceptSuperposition::uniform(
|
||||
"test",
|
||||
vec![
|
||||
("alpha".into(), vec![1.0, 0.0]),
|
||||
("beta".into(), vec![0.0, 1.0]),
|
||||
],
|
||||
);
|
||||
let context = vec![1.0, 0.0];
|
||||
let label = concept.collapse(&context, 42);
|
||||
assert!(
|
||||
label == "alpha" || label == "beta",
|
||||
"Collapse must return a valid label, got: {}",
|
||||
label
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dominant_returns_highest() {
|
||||
let concept = ConceptSuperposition::with_amplitudes(
|
||||
"test",
|
||||
vec![
|
||||
("small".into(), vec![1.0], Complex::new(0.1, 0.0)),
|
||||
("big".into(), vec![1.0], Complex::new(0.9, 0.0)),
|
||||
],
|
||||
);
|
||||
let dom = concept.dominant().unwrap();
|
||||
assert_eq!(
|
||||
dom.label, "big",
|
||||
"Dominant should be the highest amplitude meaning"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interference_search_ranking() {
|
||||
let concepts = vec![
|
||||
ConceptSuperposition::uniform("relevant", vec![("match".into(), vec![1.0, 0.0, 0.0])]),
|
||||
ConceptSuperposition::uniform("irrelevant", vec![("miss".into(), vec![0.0, 0.0, 1.0])]),
|
||||
];
|
||||
let query = vec![1.0, 0.0, 0.0];
|
||||
let results = interference_search(&concepts, &query);
|
||||
assert!(!results.is_empty(), "Search should return results");
|
||||
// First result should be the relevant concept
|
||||
assert_eq!(
|
||||
results[0].concept_id, "relevant",
|
||||
"Most relevant concept should rank first"
|
||||
);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 3. Quantum-Driven Search Collapse
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::quantum_collapse::*;
|
||||
|
||||
#[test]
|
||||
fn test_collapse_valid_index() {
|
||||
let candidates = vec![vec![1.0, 0.0], vec![0.0, 1.0], vec![0.5, 0.5]];
|
||||
let search = QuantumCollapseSearch::new(candidates);
|
||||
let result = search.search(&[1.0, 0.0], 3, 42);
|
||||
assert!(
|
||||
result.index < search.num_real(),
|
||||
"Collapse index {} should be < num_real {}",
|
||||
result.index,
|
||||
search.num_real()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distribution_stability() {
|
||||
let candidates = vec![
|
||||
vec![1.0, 0.0, 0.0],
|
||||
vec![0.0, 1.0, 0.0],
|
||||
vec![0.0, 0.0, 1.0],
|
||||
];
|
||||
let search = QuantumCollapseSearch::new(candidates);
|
||||
let dist = search.search_distribution(&[1.0, 0.0, 0.0], 3, 200, 42);
|
||||
// The most similar candidate (index 0) should appear most often
|
||||
let top = dist.iter().max_by_key(|x| x.1).unwrap();
|
||||
assert!(
|
||||
top.1 > 30,
|
||||
"Top candidate should appear in >15% of 200 shots, got {} at index {}",
|
||||
top.1,
|
||||
top.0
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_seeds_can_differ() {
|
||||
let candidates = vec![vec![0.5, 0.5], vec![0.5, -0.5]];
|
||||
let search = QuantumCollapseSearch::new(candidates);
|
||||
let mut results = std::collections::HashSet::new();
|
||||
for seed in 0..20 {
|
||||
let r = search.search(&[0.5, 0.5], 2, seed);
|
||||
results.insert(r.index);
|
||||
}
|
||||
// With enough different seeds, we should see variation
|
||||
assert!(results.len() >= 1, "Should get at least one result");
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 4. Error-Corrected Reasoning Traces
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::reasoning_qec::*;
|
||||
|
||||
#[test]
|
||||
fn test_no_noise_clean_syndrome() {
|
||||
let steps = vec![
|
||||
ReasoningStep {
|
||||
label: "premise".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "inference".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "conclusion".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
];
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 3,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let result = trace.run_qec().unwrap();
|
||||
assert_eq!(
|
||||
result.syndrome.len(),
|
||||
2,
|
||||
"3 steps should produce 2 syndrome bits"
|
||||
);
|
||||
assert!(result.is_decodable, "Zero-noise trace must be decodable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_high_noise_triggers_syndrome() {
|
||||
// Use noise_rate=0.5 with seed that flips some but not all steps.
|
||||
// This creates non-uniform flips so adjacent steps disagree, triggering syndromes.
|
||||
let steps = vec![
|
||||
ReasoningStep {
|
||||
label: "a".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "b".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "c".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "d".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
ReasoningStep {
|
||||
label: "e".into(),
|
||||
confidence: 1.0,
|
||||
},
|
||||
];
|
||||
// With noise_rate=0.5, about half the steps get flipped, creating parity mismatches
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: 5,
|
||||
noise_rate: 0.5,
|
||||
seed: Some(42),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let result = trace.run_qec().unwrap();
|
||||
assert_eq!(
|
||||
result.syndrome.len(),
|
||||
4,
|
||||
"5 steps should produce 4 syndrome bits"
|
||||
);
|
||||
assert_eq!(result.num_steps, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_syndrome_length() {
|
||||
let n = 6;
|
||||
let steps: Vec<_> = (0..n)
|
||||
.map(|i| ReasoningStep {
|
||||
label: format!("step_{}", i),
|
||||
confidence: 0.9,
|
||||
})
|
||||
.collect();
|
||||
let config = ReasoningQecConfig {
|
||||
num_steps: n,
|
||||
noise_rate: 0.0,
|
||||
seed: Some(42),
|
||||
};
|
||||
let mut trace = ReasoningTrace::new(steps, config).unwrap();
|
||||
let result = trace.run_qec().unwrap();
|
||||
assert_eq!(
|
||||
result.syndrome.len(),
|
||||
n - 1,
|
||||
"N steps should give N-1 syndrome bits"
|
||||
);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 5. Quantum-Modulated Agent Swarms
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::swarm_interference::*;
|
||||
|
||||
#[test]
|
||||
fn test_unanimous_support() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
let action = Action {
|
||||
id: "deploy".into(),
|
||||
description: "Deploy to prod".into(),
|
||||
};
|
||||
for i in 0..5 {
|
||||
swarm.contribute(AgentContribution::new(
|
||||
&format!("agent_{}", i),
|
||||
action.clone(),
|
||||
1.0,
|
||||
true,
|
||||
));
|
||||
}
|
||||
let decisions = swarm.decide();
|
||||
assert!(!decisions.is_empty());
|
||||
// 5 agents at amplitude 1.0, phase 0: total amplitude = 5, prob = 25
|
||||
assert!(
|
||||
decisions[0].probability > 20.0,
|
||||
"Unanimous support: prob should be high"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_opposition_cancels() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
let action = Action {
|
||||
id: "risky".into(),
|
||||
description: "Risky action".into(),
|
||||
};
|
||||
// 3 support, 3 oppose → should nearly cancel
|
||||
for i in 0..3 {
|
||||
swarm.contribute(AgentContribution::new(
|
||||
&format!("pro_{}", i),
|
||||
action.clone(),
|
||||
1.0,
|
||||
true,
|
||||
));
|
||||
}
|
||||
for i in 0..3 {
|
||||
swarm.contribute(AgentContribution::new(
|
||||
&format!("con_{}", i),
|
||||
action.clone(),
|
||||
1.0,
|
||||
false,
|
||||
));
|
||||
}
|
||||
let decisions = swarm.decide();
|
||||
assert!(!decisions.is_empty());
|
||||
// 3 - 3 = 0 net amplitude → prob ≈ 0
|
||||
assert!(
|
||||
decisions[0].probability < 0.01,
|
||||
"Equal support/opposition should cancel: prob = {}",
|
||||
decisions[0].probability
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partial_opposition_reduces() {
|
||||
let action = Action {
|
||||
id: "a".into(),
|
||||
description: "".into(),
|
||||
};
|
||||
|
||||
// Pure support
|
||||
let mut pure = SwarmInterference::new();
|
||||
for i in 0..3 {
|
||||
pure.contribute(AgentContribution::new(
|
||||
&format!("p{}", i),
|
||||
action.clone(),
|
||||
1.0,
|
||||
true,
|
||||
));
|
||||
}
|
||||
let pure_prob = pure.decide()[0].probability;
|
||||
|
||||
// Support with opposition
|
||||
let mut mixed = SwarmInterference::new();
|
||||
for i in 0..3 {
|
||||
mixed.contribute(AgentContribution::new(
|
||||
&format!("p{}", i),
|
||||
action.clone(),
|
||||
1.0,
|
||||
true,
|
||||
));
|
||||
}
|
||||
mixed.contribute(AgentContribution::new("opp", action.clone(), 1.0, false));
|
||||
let mixed_prob = mixed.decide()[0].probability;
|
||||
|
||||
assert!(
|
||||
mixed_prob < pure_prob,
|
||||
"Opposition should reduce probability: {} < {}",
|
||||
mixed_prob,
|
||||
pure_prob
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deadlock_detection() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
let a = Action {
|
||||
id: "a".into(),
|
||||
description: "".into(),
|
||||
};
|
||||
let b = Action {
|
||||
id: "b".into(),
|
||||
description: "".into(),
|
||||
};
|
||||
// Two different actions with identical support → deadlock
|
||||
swarm.contribute(AgentContribution::new("pro_a", a.clone(), 1.0, true));
|
||||
swarm.contribute(AgentContribution::new("pro_b", b.clone(), 1.0, true));
|
||||
assert!(
|
||||
swarm.is_deadlocked(0.01),
|
||||
"Equal support for two actions should deadlock"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_winner_picks_highest() {
|
||||
let mut swarm = SwarmInterference::new();
|
||||
let a = Action {
|
||||
id: "a".into(),
|
||||
description: "".into(),
|
||||
};
|
||||
let b = Action {
|
||||
id: "b".into(),
|
||||
description: "".into(),
|
||||
};
|
||||
// 3 agents support A, 1 supports B
|
||||
for i in 0..3 {
|
||||
swarm.contribute(AgentContribution::new(
|
||||
&format!("a{}", i),
|
||||
a.clone(),
|
||||
1.0,
|
||||
true,
|
||||
));
|
||||
}
|
||||
swarm.contribute(AgentContribution::new("b0", b.clone(), 1.0, true));
|
||||
let winner = swarm.winner().unwrap();
|
||||
assert_eq!(winner.action.id, "a", "Action with more support should win");
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 6. Syndrome-Based AI Self Diagnosis
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::syndrome_diagnosis::*;
|
||||
|
||||
#[test]
|
||||
fn test_healthy_system() {
|
||||
let components = vec![
|
||||
Component {
|
||||
id: "A".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "B".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "C".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
];
|
||||
let connections = vec![
|
||||
Connection {
|
||||
from: 0,
|
||||
to: 1,
|
||||
strength: 1.0,
|
||||
},
|
||||
Connection {
|
||||
from: 1,
|
||||
to: 2,
|
||||
strength: 1.0,
|
||||
},
|
||||
];
|
||||
let diag = SystemDiagnostics::new(components, connections);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.0,
|
||||
num_rounds: 10,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
// No faults injected → no syndromes should fire
|
||||
for round in &result.rounds {
|
||||
assert!(
|
||||
round.injected_faults.is_empty(),
|
||||
"No faults should be injected at rate 0"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fault_injection_triggers() {
|
||||
let components = vec![
|
||||
Component {
|
||||
id: "A".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "B".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
];
|
||||
let connections = vec![Connection {
|
||||
from: 0,
|
||||
to: 1,
|
||||
strength: 1.0,
|
||||
}];
|
||||
let diag = SystemDiagnostics::new(components, connections);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 1.0,
|
||||
num_rounds: 10,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
let any_fault = result.rounds.iter().any(|r| !r.injected_faults.is_empty());
|
||||
assert!(any_fault, "100% fault rate should inject faults");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_diagnosis_round_count() {
|
||||
let components = vec![
|
||||
Component {
|
||||
id: "X".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "Y".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
];
|
||||
let connections = vec![Connection {
|
||||
from: 0,
|
||||
to: 1,
|
||||
strength: 1.0,
|
||||
}];
|
||||
let diag = SystemDiagnostics::new(components, connections);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.5,
|
||||
num_rounds: 20,
|
||||
seed: 99,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
assert_eq!(result.rounds.len(), 20, "Should have exactly 20 rounds");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fragility_scores_produced() {
|
||||
let components = vec![
|
||||
Component {
|
||||
id: "A".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "B".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
Component {
|
||||
id: "C".into(),
|
||||
health: 1.0,
|
||||
},
|
||||
];
|
||||
let connections = vec![
|
||||
Connection {
|
||||
from: 0,
|
||||
to: 1,
|
||||
strength: 1.0,
|
||||
},
|
||||
Connection {
|
||||
from: 0,
|
||||
to: 2,
|
||||
strength: 1.0,
|
||||
},
|
||||
Connection {
|
||||
from: 1,
|
||||
to: 2,
|
||||
strength: 1.0,
|
||||
},
|
||||
];
|
||||
let diag = SystemDiagnostics::new(components, connections);
|
||||
let config = DiagnosisConfig {
|
||||
fault_injection_rate: 0.5,
|
||||
num_rounds: 50,
|
||||
seed: 42,
|
||||
};
|
||||
let result = diag.diagnose(&config).unwrap();
|
||||
assert_eq!(
|
||||
result.fragility_scores.len(),
|
||||
3,
|
||||
"Should have score per component"
|
||||
);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 7. Time-Reversible Memory
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::reversible_memory::*;
|
||||
|
||||
#[test]
|
||||
fn test_rewind_restores_state() {
|
||||
let mut mem = ReversibleMemory::new(2).unwrap();
|
||||
let initial_probs = mem.probabilities();
|
||||
mem.apply(Gate::H(0)).unwrap();
|
||||
mem.apply(Gate::X(1)).unwrap();
|
||||
// State changed
|
||||
assert_ne!(mem.probabilities(), initial_probs);
|
||||
// Rewind 2 steps
|
||||
mem.rewind(2).unwrap();
|
||||
// Should be back to |00⟩
|
||||
let restored = mem.probabilities();
|
||||
assert!(
|
||||
(restored[0] - 1.0).abs() < EPSILON,
|
||||
"Rewind should restore |00>: {:?}",
|
||||
restored
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_counterfactual_divergence() {
|
||||
let mut mem = ReversibleMemory::new(2).unwrap();
|
||||
mem.apply(Gate::H(0)).unwrap(); // step 0: creates superposition
|
||||
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangles
|
||||
|
||||
// Counterfactual: what if we skip the H gate?
|
||||
let cf = mem.counterfactual(0).unwrap();
|
||||
assert!(
|
||||
cf.divergence > EPSILON,
|
||||
"Removing H gate should produce divergence: {}",
|
||||
cf.divergence
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_counterfactual_identity_step() {
|
||||
let mut mem = ReversibleMemory::new(1).unwrap();
|
||||
mem.apply(Gate::H(0)).unwrap();
|
||||
// Apply Rz(0) — effectively identity
|
||||
mem.apply(Gate::Rz(0, 0.0)).unwrap();
|
||||
mem.apply(Gate::X(0)).unwrap();
|
||||
|
||||
let cf = mem.counterfactual(1).unwrap(); // remove the Rz(0)
|
||||
assert!(
|
||||
cf.divergence < EPSILON,
|
||||
"Removing identity-like step should have zero divergence: {}",
|
||||
cf.divergence
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sensitivity_identifies_important_gate() {
|
||||
let mut mem = ReversibleMemory::new(2).unwrap();
|
||||
mem.apply(Gate::Rz(0, 0.001)).unwrap(); // step 0: tiny rotation (unimportant)
|
||||
mem.apply(Gate::H(0)).unwrap(); // step 1: creates superposition (important)
|
||||
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 2: entangles (important)
|
||||
|
||||
let sens = mem.sensitivity_analysis(0.5).unwrap();
|
||||
// The tiny Rz should be less sensitive than the H or CNOT
|
||||
assert!(
|
||||
sens.sensitivities[0] <= sens.sensitivities[sens.most_sensitive],
|
||||
"Tiny rotation should be less sensitive than the most sensitive gate"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_history_length() {
|
||||
let mut mem = ReversibleMemory::new(1).unwrap();
|
||||
assert_eq!(mem.history_len(), 0);
|
||||
mem.apply(Gate::H(0)).unwrap();
|
||||
assert_eq!(mem.history_len(), 1);
|
||||
mem.apply(Gate::X(0)).unwrap();
|
||||
assert_eq!(mem.history_len(), 2);
|
||||
mem.rewind(1).unwrap();
|
||||
assert_eq!(mem.history_len(), 1);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// 8. Browser-Native Quantum Reality Checks
|
||||
// ===========================================================================
|
||||
|
||||
use ruqu_exotic::reality_check::*;
|
||||
|
||||
#[test]
|
||||
fn test_superposition_check() {
|
||||
let r = check_superposition();
|
||||
assert!(r.passed, "Superposition check failed: {}", r.detail);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entanglement_check() {
|
||||
let r = check_entanglement();
|
||||
assert!(r.passed, "Entanglement check failed: {}", r.detail);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interference_check() {
|
||||
let r = check_interference();
|
||||
assert!(r.passed, "Interference check failed: {}", r.detail);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_phase_kickback_check() {
|
||||
let r = check_phase_kickback();
|
||||
assert!(r.passed, "Phase kickback check failed: {}", r.detail);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_cloning_check() {
|
||||
let r = check_no_cloning();
|
||||
assert!(r.passed, "No-cloning check failed: {}", r.detail);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_checks_pass() {
|
||||
let results = run_all_checks();
|
||||
assert_eq!(results.len(), 5, "Should have 5 built-in checks");
|
||||
for r in &results {
|
||||
assert!(r.passed, "Check '{}' failed: {}", r.check_name, r.detail);
|
||||
}
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// DISCOVERY: Cross-Module Experiments
|
||||
// ===========================================================================
|
||||
// These tests combine exotic modules to discover emergent behavior.
|
||||
|
||||
/// DISCOVERY 1: Decoherence trajectory as a classifier.
|
||||
/// Two similar embeddings decohere similarly. Two different ones diverge.
|
||||
/// The RATE of fidelity loss is a fingerprint.
|
||||
#[test]
|
||||
fn test_discovery_decoherence_trajectory_fingerprint() {
|
||||
let emb_a1 = QuantumEmbedding::from_embedding(&[1.0, 0.5, 0.0, 0.0], 0.1);
|
||||
let emb_a2 = QuantumEmbedding::from_embedding(&[0.9, 0.6, 0.0, 0.0], 0.1);
|
||||
let emb_b = QuantumEmbedding::from_embedding(&[0.0, 0.0, 1.0, 0.5], 0.1);
|
||||
|
||||
// Decohere all with same seed
|
||||
let mut emb_a1 = emb_a1;
|
||||
emb_a1.decohere(5.0, 100);
|
||||
let mut emb_a2 = emb_a2;
|
||||
emb_a2.decohere(5.0, 100);
|
||||
let mut emb_b = emb_b;
|
||||
emb_b.decohere(5.0, 100);
|
||||
|
||||
let fid_a1 = emb_a1.fidelity();
|
||||
let fid_a2 = emb_a2.fidelity();
|
||||
let fid_b = emb_b.fidelity();
|
||||
|
||||
// Similar embeddings should have similar fidelity trajectories
|
||||
let diff_similar = (fid_a1 - fid_a2).abs();
|
||||
let diff_different = (fid_a1 - fid_b).abs();
|
||||
|
||||
// This is the discovery: similar embeddings decohere similarly
|
||||
// We can't guarantee strict ordering due to noise, but we can observe the pattern
|
||||
println!("DISCOVERY: Decoherence fingerprint");
|
||||
println!(" Similar pair fidelity diff: {:.6}", diff_similar);
|
||||
println!(" Different pair fidelity diff: {:.6}", diff_different);
|
||||
println!(
|
||||
" A1 fidelity: {:.6}, A2 fidelity: {:.6}, B fidelity: {:.6}",
|
||||
fid_a1, fid_a2, fid_b
|
||||
);
|
||||
}
|
||||
|
||||
/// DISCOVERY 2: Interference creates NEW vectors not in original space.
|
||||
/// When two concept meanings interfere with a context, the resulting
|
||||
/// amplitude pattern is a vector that encodes the relationship between
|
||||
/// the concepts and the context — not just a reranking.
|
||||
#[test]
|
||||
fn test_discovery_interference_creates_novel_representations() {
|
||||
// "spring" — three meanings
|
||||
let concept = ConceptSuperposition::uniform(
|
||||
"spring",
|
||||
vec![
|
||||
("season".into(), vec![1.0, 0.0, 0.0, 0.0]),
|
||||
("water_source".into(), vec![0.0, 1.0, 0.0, 0.0]),
|
||||
("mechanical".into(), vec![0.0, 0.0, 1.0, 0.0]),
|
||||
],
|
||||
);
|
||||
|
||||
// Three different contexts
|
||||
let ctx_weather = vec![0.9, 0.0, 0.0, 0.1];
|
||||
let ctx_geology = vec![0.1, 0.8, 0.1, 0.0];
|
||||
let ctx_engineering = vec![0.0, 0.0, 0.9, 0.1];
|
||||
|
||||
let scores_weather = concept.interfere(&ctx_weather);
|
||||
let scores_geology = concept.interfere(&ctx_geology);
|
||||
let scores_engineering = concept.interfere(&ctx_engineering);
|
||||
|
||||
println!("DISCOVERY: Interference resolves polysemy");
|
||||
for (ctx_name, scores) in &[
|
||||
("weather", &scores_weather),
|
||||
("geology", &scores_geology),
|
||||
("engineering", &scores_engineering),
|
||||
] {
|
||||
let top = scores
|
||||
.iter()
|
||||
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
||||
.unwrap();
|
||||
println!(
|
||||
" Context '{}' → top meaning: '{}' (prob: {:.4})",
|
||||
ctx_name, top.label, top.probability
|
||||
);
|
||||
}
|
||||
|
||||
// Verify each context surfaces the right meaning
|
||||
let top_weather = scores_weather
|
||||
.iter()
|
||||
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
||||
.unwrap();
|
||||
let top_geology = scores_geology
|
||||
.iter()
|
||||
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
||||
.unwrap();
|
||||
let top_engineering = scores_engineering
|
||||
.iter()
|
||||
.max_by(|a, b| a.probability.partial_cmp(&b.probability).unwrap())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(top_weather.label, "season");
|
||||
assert_eq!(top_geology.label, "water_source");
|
||||
assert_eq!(top_engineering.label, "mechanical");
|
||||
}
|
||||
|
||||
/// DISCOVERY 3: Counterfactual reveals hidden dependencies.
|
||||
/// In a chain of operations, some steps are critical (removing them
|
||||
/// changes everything) and some are redundant (removing them changes nothing).
|
||||
/// This is impossible to know in forward-only systems.
|
||||
#[test]
|
||||
fn test_discovery_counterfactual_dependency_map() {
|
||||
let mut mem = ReversibleMemory::new(3).unwrap();
|
||||
|
||||
// Build an entangled state through a sequence
|
||||
mem.apply(Gate::H(0)).unwrap(); // step 0: superposition on q0
|
||||
mem.apply(Gate::CNOT(0, 1)).unwrap(); // step 1: entangle q0-q1
|
||||
mem.apply(Gate::Rz(2, 0.001)).unwrap(); // step 2: tiny rotation on q2 (nearly no-op)
|
||||
mem.apply(Gate::CNOT(1, 2)).unwrap(); // step 3: propagate entanglement to q2
|
||||
mem.apply(Gate::H(2)).unwrap(); // step 4: mix q2
|
||||
|
||||
println!("DISCOVERY: Counterfactual dependency map");
|
||||
for i in 0..5 {
|
||||
let cf = mem.counterfactual(i).unwrap();
|
||||
println!(" Step {} removed: divergence = {:.6}", i, cf.divergence);
|
||||
}
|
||||
|
||||
// Step 0 (H) should be most critical — it creates all the superposition
|
||||
let cf0 = mem.counterfactual(0).unwrap();
|
||||
// Step 2 (tiny Rz) should be least critical
|
||||
let cf2 = mem.counterfactual(2).unwrap();
|
||||
|
||||
assert!(
|
||||
cf0.divergence > cf2.divergence,
|
||||
"H gate (step 0) should be more critical than tiny Rz (step 2): {} > {}",
|
||||
cf0.divergence,
|
||||
cf2.divergence
|
||||
);
|
||||
}
|
||||
|
||||
/// DISCOVERY 4: Swarm interference naturally resolves what voting cannot.
|
||||
/// With voting: 3 for A, 2 for B → A wins 60/40.
|
||||
/// With interference: depends on agent PHASES, not just counts.
|
||||
/// Confident agreement amplifies exponentially. Uncertain agents barely contribute.
|
||||
#[test]
|
||||
fn test_discovery_swarm_phase_matters() {
|
||||
let action = Action {
|
||||
id: "x".into(),
|
||||
description: "".into(),
|
||||
};
|
||||
|
||||
// Scenario 1: 3 confident agents, all aligned (phase 0)
|
||||
let mut aligned = SwarmInterference::new();
|
||||
for i in 0..3 {
|
||||
aligned.contribute(AgentContribution::new(
|
||||
&format!("a{}", i),
|
||||
action.clone(),
|
||||
1.0,
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
// Scenario 2: 3 agents, same count, but one has phase π/2 (uncertain direction)
|
||||
let mut misaligned = SwarmInterference::new();
|
||||
misaligned.contribute(AgentContribution::new("b0", action.clone(), 1.0, true));
|
||||
misaligned.contribute(AgentContribution::new("b1", action.clone(), 1.0, true));
|
||||
// Third agent contributes with 90-degree phase offset (uncertain)
|
||||
misaligned.contribute(AgentContribution::multi(
|
||||
"b2",
|
||||
vec![
|
||||
(action.clone(), Complex::new(0.0, 1.0)), // phase π/2
|
||||
],
|
||||
));
|
||||
|
||||
let prob_aligned = aligned.decide()[0].probability;
|
||||
let prob_misaligned = misaligned.decide()[0].probability;
|
||||
|
||||
println!("DISCOVERY: Phase alignment matters for swarm decisions");
|
||||
println!(
|
||||
" Aligned (3 agents, same phase): prob = {:.4}",
|
||||
prob_aligned
|
||||
);
|
||||
println!(
|
||||
" Misaligned (2 same, 1 orthogonal): prob = {:.4}",
|
||||
prob_misaligned
|
||||
);
|
||||
|
||||
assert!(
|
||||
prob_aligned > prob_misaligned,
|
||||
"Phase-aligned swarm should produce higher probability"
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user